|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:04:50.562560Z" |
|
}, |
|
"title": "Audio-Visual Understanding of Passenger Intents for In-Cabin Conversational Agents", |
|
"authors": [ |
|
{ |
|
"first": "Eda", |
|
"middle": [], |
|
"last": "Okur", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Anticipatory Computing Lab", |
|
"institution": "Intel Labs", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shachi", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Anticipatory Computing Lab", |
|
"institution": "Intel Labs", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Saurav", |
|
"middle": [], |
|
"last": "Sahay", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Anticipatory Computing Lab", |
|
"institution": "Intel Labs", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Lama", |
|
"middle": [], |
|
"last": "Nachman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Anticipatory Computing Lab", |
|
"institution": "Intel Labs", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Building multimodal dialogue understanding capabilities situated in the in-cabin context is crucial to enhance passenger comfort in autonomous vehicle (AV) interaction systems. To this end, understanding passenger intents from spoken interactions and vehicle vision systems is an important building block for developing contextual and visually grounded conversational agents for AV. Towards this goal, we explore AMIE (Automated-vehicle Multimodal In-cabin Experience), the in-cabin agent responsible for handling multimodal passenger-vehicle interactions. In this work, we discuss the benefits of multimodal understanding of in-cabin utterances by incorporating verbal/language input together with the non-verbal/acoustic and visual input from inside and outside the vehicle. Our experimental results outperformed text-only baselines as we achieved improved performances for intent detection with multimodal approach.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Building multimodal dialogue understanding capabilities situated in the in-cabin context is crucial to enhance passenger comfort in autonomous vehicle (AV) interaction systems. To this end, understanding passenger intents from spoken interactions and vehicle vision systems is an important building block for developing contextual and visually grounded conversational agents for AV. Towards this goal, we explore AMIE (Automated-vehicle Multimodal In-cabin Experience), the in-cabin agent responsible for handling multimodal passenger-vehicle interactions. In this work, we discuss the benefits of multimodal understanding of in-cabin utterances by incorporating verbal/language input together with the non-verbal/acoustic and visual input from inside and outside the vehicle. Our experimental results outperformed text-only baselines as we achieved improved performances for intent detection with multimodal approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Understanding passenger intents from spoken interactions and visual cues (both from inside and outside the vehicle) is an important building block towards developing contextual and scene-aware dialogue systems for autonomous vehicles. When the passengers give instructions to the in-cabin agent AMIE, the agent should parse commands properly considering three modalities (i.e., verbal/language/text, vocal/audio, visual/video) and trigger the appropriate functionality of the AV system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For in-cabin dialogue between car assistants and driver/passengers, recent studies explore creating a public dataset using a WoZ approach (Eric et al., 2017) and improving ASR for passenger speech recognition (Fukui et al., 2018) . Another recent work (Zheng et al., 2017) attempts to classify sentences as navigation-related or not using the CU-Move in-vehicle speech corpus (Hansen et al., 2001 ), a relatively old and large corpus focusing on route navigation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 157, |
|
"text": "(Eric et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 229, |
|
"text": "(Fukui et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 272, |
|
"text": "(Zheng et al., 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 396, |
|
"text": "(Hansen et al., 2001", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We collected a multimodal in-cabin dataset with multi-turn dialogues between the passengers and AMIE using a Wizard-of-Oz (WoZ) scheme via realistic scavenger hunt game. In previous work (Okur et al., 2019) , we experimented with various RNN-based models to detect the utterancelevel intents (i.e., set-destination, change-route, gofaster, go-slower, stop, park, pull-over, drop-off, open-door, other) along with the intent keywords and relevant slots (i.e., location, position/direction, object, gesture/gaze, time-guidance, person) associated with these intents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 206, |
|
"text": "(Okur et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 401, |
|
"text": "go-slower, stop, park, pull-over, drop-off, open-door, other)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we discuss the benefits of a multimodal understanding of in-cabin utterances by incorporating verbal/language input together with the non-verbal/acoustic and visual cues, both from inside and outside the vehicle (e.g., passenger gestures and gaze from in-cabin video stream, referred objects outside of the vehicle from the road view camera stream).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our AMIE in-cabin dataset includes 30 hours of multimodal data collected from 30 passengers (15 female, 15 male) in a total of 20 sessions. In 10 sessions, a single passenger was present, whereas the remaining 10 sessions include two passengers interacting with the vehicle. Participants sit in the back of the vehicle, separated from the driver and the human acting as an agent at the front. The vehicle is modified to hide the operator and the WoZ AMIE agent from the passengers, using a variation of the WoZ approach (Wang et al., 2017) . In each ride/session, which lasted about 1 hour or more, the participants were playing a realistic scavenger hunt game on the streets of Richmond, BC, Canada. Passengers treat the vehicle as AV and communicate with the WoZ AMIE agent mainly via speech commands. Game objectives require passengers to interact naturally with the agent to go to certain destinations, update routes, give specific directions regarding where to pull over or park (sometimes with gestures), find landmarks (refer to outside objects), stop the vehicle, change speed, get in and out of the vehicle, etc. Further details of the data collection protocol and dataset statistics can be found in (Sherry et al., 2018; Okur et al., 2019) . See Fig. 1 for the vehicle instrumentation to enable multimodal data collection setup.", |
|
"cite_spans": [ |
|
{ |
|
"start": 520, |
|
"end": 539, |
|
"text": "(Wang et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1209, |
|
"end": 1230, |
|
"text": "(Sherry et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1231, |
|
"end": 1249, |
|
"text": "Okur et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1256, |
|
"end": 1262, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Multimodal AMIE dataset consists of in-cabin conversations between the passengers and the AV agent, with 10590 utterances in total. 1331 of these utterances have commands to the WoZ agent, hence they are associated with passenger intents. Utterance-level intent and word-level slot annotations are obtained on the transcribed utterances by majority voting of 3 annotators. The annotation results for utterance-level intent types, slots and intent keywords can be found in Table 1 and Table 2 ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 472, |
|
"end": 491, |
|
"text": "Table 1 and Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Statistics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We explored leveraging multimodality for the Natural Language Understanding (NLU) module in the Spoken Dialogue System (SDS) pipeline. As our AMIE in-cabin dataset has audio and video recordings, we investigated three modalities for the NLU: text, audio, and visual. For text (verbal/language) modality, we employed the Hierarchical & Joint Bi-LSTM model (Schuster and Paliwal, 1997; Hakkani-Tur et al., 2016; Zhang and Wang, 2016; Wen et al., 2018) , namely H-Joint-2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 383, |
|
"text": "(Schuster and Paliwal, 1997;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 409, |
|
"text": "Hakkani-Tur et al., 2016;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 431, |
|
"text": "Zhang and Wang, 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 449, |
|
"text": "Wen et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Hierarchical & Joint Model (H-Joint-2):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This is a 2-level hierarchical joint learning model that detects/extracts intent keywords & slots using sequence-to-sequence Bi-LSTMs first (Level-1), then only the words that are predicted as intent keywords & valid slots are fed into the Joint-2 model (Level-2), which is another sequence-to-sequence Bi-LSTM network for utterance-level intent detection, jointly trained with slots & intent keywords.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This architecture was chosen based on the bestperforming uni-modal results presented in previous work (Okur et al., 2019) for utterance-level intent recognition and slot filling on our AMIE dataset. These initial uni-modal results were obtained on the transcribed text with pre-trained GloVe word embeddings (Pennington et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 121, |
|
"text": "(Okur et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 308, |
|
"end": 333, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this study, we explore the following multimodal features to better assess passenger intents for conversational agents in self-driving cars: word embeddings for text, speech embeddings and acoustic features for audio, and visual features for the video modality. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We incorporated pre-trained speech embeddings, called Speech2Vec 1 , as additional audio-related features. These Speech2Vec embeddings (Chung and Glass, 2018) are trained on a corpus of 500 hours of speech from LibriSpeech. Speech2Vec can be considered as a speech version of Word2Vec embeddings (Mikolov et al., 2013) , where the idea is that learning the representations directly from speech can capture the information carried by speech that may not exist in plain text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 158, |
|
"text": "(Chung and Glass, 2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 318, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word and Speech Embeddings", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We experimented with concatenating word and speech vectors using GloVe embeddings (6B tokens, 400K vocab, 100-dim), Speech2Vec embeddings (37.6K vocab, 100-dim), and its Word2Vec (37.6K vocab, 100-dim) counterpart, in which the Word2Vec embeddings are trained on the transcript of the same LibriSpeech corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word and Speech Embeddings", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Using openSMILE 2 audio feature extraction toolkit (Eyben et al., 2013) , 1582 acoustic features are extracted for each utterance using the segmented audio clips from AMIE dataset. These are the INTERSPEECH 2010 Paralinguistic Challenge (IS10) features (Schuller et al., 2010) including PCM (pulse-code modulation) loudness, MFCC (Mel-frequency cepstral coefficients), log Mel Freq. Band, LSP (line spectral pairs) Frequency, etc.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 71, |
|
"text": "(Eyben et al., 2013)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 276, |
|
"text": "(Schuller et al., 2010)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Features", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "1 https://github.com/iamyuanchung/ speech2vec-pretrained-vectors 2 https://www.audeering.com/opensmile/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acoustic Features", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Intermediate CNN features 3 are extracted from each video clip segmented per utterance from the AMIE dataset. Using the feature extraction process described in (Kordopatis-Zilos et al., 2017), one frame per second is sampled for any given input video clip and its visual descriptors are extracted from the activations of the intermediate convolution layers of a pre-trained CNN. We used the pre-trained Inception-ResNet-v2 model 4 (Szegedy et al., 2016) and generated 4096-dim features for each sample. We experimented with utilizing two sources of visual information: (i) cabin/passenger view from the back-driver RGB camera recordings, (ii) road/outside view from the dash-cam RGB video streams.", |
|
"cite_spans": [ |
|
{ |
|
"start": 431, |
|
"end": 453, |
|
"text": "(Szegedy et al., 2016)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visual Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Performance results of the utterance-level intent recognition models with varying modality and feature concatenations can be found in Table 3 , using hierarchical joint learning (H-Joint-2). For text and speech embeddings experiments, we observe that using Word2Vec or Speech2Vec representations achieve comparable F1-score performances, which are significantly below the GloVe embeddings performance. This was expected as the pretrained Speech2Vec vectors have lower vocabulary coverage than the GloVe vectors. On the other hand, we observe that concatenating GloVe + Speech2Vec embeddings, and further GloVe + Word2Vec + Speech2Vec yields higher F1-scores for intent recognition. These results show that the speech embeddings indeed can capture useful semantic information carried by speech only, which may not exist in plain text. We also investigate incorporating the audiovisual features on top of text-only and text + speech embedding models. Including openSMILE/IS10 acoustic features from audio as well as intermediate CNN/Inception-ResNet-v2 features from video brings slight improvements to our intent recognition models, achieving 0.92 F1-score. These initial results may require further explorations for specific intents such as stop (e.g., audio intensity & loudness could have helped), or for relevant slots such as passenger gesture/gaze (e.g., cabin-view features) and outside objects (e.g., road-view features).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 141, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this work, we briefly present our initial explorations towards the multimodal understanding of passenger utterances in autonomous vehicles. We show that our experimental results outperformed the uni-modal text-only baseline results, and with multimodality, we achieved improved performances for passenger intent detection in AV. This ongoing research has the potential impact of exploring real-world challenges with humanvehicle-scene interactions for autonomous driving support via spoken utterances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "There exist various exciting recent work on improved multimodal fusion techniques Liang et al., 2019a; Pham et al., 2019; Baltru\u0161aitis et al., 2019) . In addition to the simplified feature and modality concatenations, we plan to explore some of these promising tensor-based multimodal fusion networks (Liu et al., 2018; Liang et al., 2019b; Tsai et al., 2019) for more robust intent classification on AMIE dataset as future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 102, |
|
"text": "Liang et al., 2019a;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 103, |
|
"end": 121, |
|
"text": "Pham et al., 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 122, |
|
"end": 148, |
|
"text": "Baltru\u0161aitis et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 319, |
|
"text": "(Liu et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 340, |
|
"text": "Liang et al., 2019b;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 359, |
|
"text": "Tsai et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://github.com/MKLab-ITI/ intermediate-cnn-features 4 https://github.com/tensorflow/models/ tree/master/research/slim", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Multimodal machine learning: A survey and taxonomy", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Baltru\u0161aitis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Ahuja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", |
|
"volume": "41", |
|
"issue": "2", |
|
"pages": "423--443", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Baltru\u0161aitis, C. Ahuja, and L. Morency. 2019. Mul- timodal machine learning: A survey and taxonomy. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41(2):423-443.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Speech2vec: A sequence-to-sequence framework for learning word embeddings from speech", |
|
"authors": [ |
|
{ |
|
"first": "Yu-An", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proc. INTERSPEECH 2018", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "811--815", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.21437/Interspeech.2018-2341" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-An Chung and James Glass. 2018. Speech2vec: A sequence-to-sequence framework for learning word embeddings from speech. In Proc. INTERSPEECH 2018, pages 811-815.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Key-value retrieval networks for task-oriented dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lakshmi", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francois", |
|
"middle": [], |
|
"last": "Charette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "37--49", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-5506" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihail Eric, Lakshmi Krishnan, Francois Charette, and Christopher D. Manning. 2017. Key-value retrieval networks for task-oriented dialogue. In Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 37-49. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Recent developments in opensmile, the munich open-source multimedia feature extractor", |
|
"authors": [ |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Eyben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Weninger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. ACM International Conference on Multimedia, MM '13", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "835--838", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2502081.2502224" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Florian Eyben, Felix Weninger, Florian Gross, and Bj\u00f6rn Schuller. 2013. Recent developments in opensmile, the munich open-source multimedia fea- ture extractor. In Proc. ACM International Confer- ence on Multimedia, MM '13, pages 835-838.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Sound source separation for plural passenger speech recognition in smart mobility system", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Fukui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kanazawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Transactions on Consumer Electronics", |
|
"volume": "64", |
|
"issue": "3", |
|
"pages": "399--405", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TCE.2018.2867801" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Fukui, T. Watanabe, and M. Kanazawa. 2018. Sound source separation for plural passenger speech recognition in smart mobility system. IEEE Trans- actions on Consumer Electronics, 64(3):399-405.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Multi-domain joint semantic frame parsing using bi-directional rnn-lstm", |
|
"authors": [ |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun-Nung Vivian", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye-Yi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dilek Hakkani-Tur, Gokhan Tur, Asli Celikyilmaz, Yun-Nung Vivian Chen, Jianfeng Gao, Li Deng, and Ye-Yi Wang. 2016. Multi-domain joint semantic frame parsing using bi-directional rnn-lstm. ISCA.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Cumove: Analysis & corpus development for interactive in-vehicle speech systems", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pongtep", |
|
"middle": [], |
|
"last": "Hansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jay", |
|
"middle": [], |
|
"last": "Angkititrakul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Plucienkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Umit", |
|
"middle": [], |
|
"last": "Gallant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Yapanel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wayne", |
|
"middle": [], |
|
"last": "Pellom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Seventh European Conference on Speech Communication and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John HL Hansen, Pongtep Angkititrakul, Jay Plu- cienkowski, Stephen Gallant, Umit Yapanel, Bryan Pellom, Wayne Ward, and Ron Cole. 2001. Cu- move: Analysis & corpus development for interac- tive in-vehicle speech systems. In Seventh European Conference on Speech Communication and Technol- ogy.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Near-duplicate video retrieval by aggregating intermediate cnn layers", |
|
"authors": [], |
|
"year": 2017, |
|
"venue": "Giorgos Kordopatis-Zilos, Symeon Papadopoulos, Ioannis Patras, and Yiannis Kompatsiaris", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "251--263", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://rd.springer.com/chapter/10.1007/978-3-319-51811-4_21" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giorgos Kordopatis-Zilos, Symeon Papadopoulos, Ioannis Patras, and Yiannis Kompatsiaris. 2017. Near-duplicate video retrieval by aggregating inter- mediate cnn layers. In International Conference on Multimedia Modeling, pages 251-263. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Strong and simple baselines for multimodal utterance embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Paul Pu", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yao", |
|
"middle": [ |
|
"Chong" |
|
], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yao-Hung Hubert", |
|
"middle": [], |
|
"last": "Tsai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2599--2609", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1267" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Pu Liang, Yao Chong Lim, Yao-Hung Hubert Tsai, Ruslan Salakhutdinov, and Louis-Philippe Morency. 2019a. Strong and simple baselines for multimodal utterance embeddings. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2599-2609, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning representations from imperfect time series data via tensor rank regularization", |
|
"authors": [ |
|
{ |
|
"first": "Zhun", |
|
"middle": [], |
|
"last": "Paul Pu Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yao-Hung Hubert", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qibin", |
|
"middle": [], |
|
"last": "Tsai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1569--1576", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1152" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Pu Liang, Zhun Liu, Yao-Hung Hubert Tsai, Qibin Zhao, Ruslan Salakhutdinov, and Louis-Philippe Morency. 2019b. Learning representations from im- perfect time series data via tensor rank regulariza- tion. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1569-1576, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Efficient lowrank multimodal fusion with modality-specific factors", |
|
"authors": [ |
|
{ |
|
"first": "Zhun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ying", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varun", |
|
"middle": [], |
|
"last": "Bharadhwaj Lakshminarasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Pu" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amirali", |
|
"middle": [], |
|
"last": "Bagher Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/p18-1209" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhun Liu, Ying Shen, Varun Bharadhwaj Lakshmi- narasimhan, Paul Pu Liang, AmirAli Bagher Zadeh, and Louis-Philippe Morency. 2018. Efficient low- rank multimodal fusion with modality-specific fac- tors. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 26th International Conference on Neural Information Processing Systems", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Cor- rado, and Jeffrey Dean. 2013. Distributed represen- tations of words and phrases and their composition- ality. In Proceedings of the 26th International Con- ference on Neural Information Processing Systems - Volume 2, NIPS'13, pages 3111-3119, USA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Natural language interactions in autonomous vehicles: Intent detection and slot filling from passenger utterances", |
|
"authors": [ |
|
{ |
|
"first": "Eda", |
|
"middle": [], |
|
"last": "Okur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Shachi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saurav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sahay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "20th International Conference on Computational Linguistics and Intelligent Text Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eda Okur, Shachi H Kumar, Saurav Sahay, Asli Arslan Esme, and Lama Nachman. 2019. Natural language interactions in autonomous vehicles: Intent detec- tion and slot filling from passenger utterances. 20th International Conference on Computational Linguis- tics and Intelligent Text Processing (CICLing 2019).", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word rep- resentation. In Empirical Methods in Natural Lan- guage Processing (EMNLP'14).", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Found in translation: Learning robust joint representations by cyclic translations between modalities", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Pu" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Manzini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "6892--6899", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Pham, Paul Pu Liang, Thomas Manzini, Louis- Philippe Morency, and Barnab\u00e1s P\u00f3czos. 2019. Found in translation: Learning robust joint represen- tations by cyclic translations between modalities. In Proceedings of the AAAI Conference on Artificial In- telligence, volume 33, pages 6892-6899.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The interspeech 2010 paralinguistic challenge", |
|
"authors": [ |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Steidl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Batliner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Burkhardt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurence", |
|
"middle": [], |
|
"last": "Devillers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrikanth S", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. INTER-SPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bj\u00f6rn Schuller, Stefan Steidl, Anton Batliner, Felix Burkhardt, Laurence Devillers, Christian M\u00fcller, and Shrikanth S Narayanan. 2010. The interspeech 2010 paralinguistic challenge. In Proc. INTER- SPEECH 2010.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Paliwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Trans. Sig. Proc", |
|
"volume": "45", |
|
"issue": "11", |
|
"pages": "2673--2681", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/78.650093" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Schuster and K.K. Paliwal. 1997. Bidirectional recurrent neural networks. Trans. Sig. Proc., 45(11):2673-2681.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Getting things done in an autonomous vehicle", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Sherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Beckwith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cagri", |
|
"middle": [], |
|
"last": "Asli Arslan Esme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tanriover", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Social Robots in the Wild Workshop, 13th ACM/IEEE International Conference on Human-Robot Interaction", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Sherry, Richard Beckwith, Asli Arslan Esme, and Cagri Tanriover. 2018. Getting things done in an autonomous vehicle. In Social Robots in the Wild Workshop, 13th ACM/IEEE International Con- ference on Human-Robot Interaction (HRI 2018).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Inception-v4, inception-resnet and the impact of residual connections on learning", |
|
"authors": [ |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Szegedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Ioffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Vanhoucke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christian Szegedy, Sergey Ioffe, and Vincent Van- houcke. 2016. Inception-v4, inception-resnet and the impact of residual connections on learning. CoRR, abs/1602.07261.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Multimodal transformer for unaligned multimodal language sequences", |
|
"authors": [ |
|
{ |
|
"first": "Yao-Hung Hubert", |
|
"middle": [], |
|
"last": "Tsai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaojie", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Pu" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Zico" |
|
], |
|
"last": "Kolter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yao-Hung Hubert Tsai, Shaojie Bai, Paul Pu Liang, J. Zico Kolter, Louis-Philippe Morency, and Ruslan Salakhutdinov. 2019. Multimodal transformer for unaligned multimodal language sequences. In Pro- ceedings of the 57th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), Florence, Italy. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Marionette: Enabling on-road wizard-ofoz autonomous driving studies", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinath", |
|
"middle": [], |
|
"last": "Sibi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Mok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wendy", |
|
"middle": [], |
|
"last": "Ju", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 ACM/IEEE International Conference on Human-Robot Interaction, HRI '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "234--243", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2909824.3020256" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Wang, Srinath Sibi, Brian Mok, and Wendy Ju. 2017. Marionette: Enabling on-road wizard-of- oz autonomous driving studies. In Proceedings of the 2017 ACM/IEEE International Conference on Human-Robot Interaction, HRI '17, pages 234-243, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Jointly modeling intent identification and slot filling with contextual and hierarchical information", |
|
"authors": [ |
|
{ |
|
"first": "Liyun", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojie", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenjiang", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Natural Language Processing and Chinese Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3--15", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://rd.springer.com/chapter/10.1007/978-3-319-73618-1_1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liyun Wen, Xiaojie Wang, Zhenjiang Dong, and Hong Chen. 2018. Jointly modeling intent identification and slot filling with contextual and hierarchical infor- mation. In Natural Language Processing and Chi- nese Computing, pages 3-15, Cham. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Soujanya Poria, Erik Cambria, and Louis-Philippe Morency", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Pu" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Mazumder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Paul Pu Liang, Navonil Mazumder, Soujanya Poria, Erik Cambria, and Louis-Philippe Morency. 2018. Memory fusion network for multi- view sequential learning. Proceedings of the Thirty- Second AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A joint model of intent determination and slot filling for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, IJCAI'16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2993--2999", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodong Zhang and Houfeng Wang. 2016. A joint model of intent determination and slot filling for spo- ken language understanding. In Proceedings of the Twenty-Fifth International Joint Conference on Arti- ficial Intelligence, IJCAI'16, pages 2993-2999.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Navigation-orientated natural spoken language understanding for intelligent vehicle dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H L" |
|
], |
|
"last": "Hansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE Intelligent Vehicles Symposium (IV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "559--564", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/IVS.2017.7995777" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Zheng, Y. Liu, and J. H. L. Hansen. 2017. Navigation-orientated natural spoken language un- derstanding for intelligent vehicle dialogue. In 2017 IEEE Intelligent Vehicles Symposium (IV), pages 559-564.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "AMIE In-cabin Data Collection Setup", |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"num": null, |
|
"text": ".", |
|
"content": "<table><tr><td>AMIE Scenario</td><td>Intent Type</td><td>Utterance Count</td></tr><tr><td>Set/Change</td><td>SetDestination</td><td>311</td></tr><tr><td>Destination/Route</td><td>SetRoute</td><td>507</td></tr><tr><td/><td>Park</td><td>151</td></tr><tr><td>Finishing the Trip</td><td>PullOver</td><td>34</td></tr><tr><td/><td>Stop</td><td>27</td></tr><tr><td>Set/Change</td><td>GoFaster</td><td>73</td></tr><tr><td>Driving Behavior/Speed</td><td>GoSlower</td><td>41</td></tr><tr><td>Others</td><td>OpenDoor</td><td>136</td></tr><tr><td>(Door, Music, A/C, etc.)</td><td>Other</td><td>51</td></tr><tr><td/><td>Total</td><td>1331</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>57</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"text": "F1-scores of Intent Recognition with Multimodal Features", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |