|
{ |
|
"paper_id": "N19-1011", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:03:18.206334Z" |
|
}, |
|
"title": "AudioCaps: Generating Captions for Audios in The Wild", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dongjoo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Byeongchang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [ |
|
"Hyunmin" |
|
], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Gunhee", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"settlement": "Seoul", |
|
"country": "Korea" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We explore the problem of audio captioning 1 : generating natural language description for any kind of audio in the wild, which has been surprisingly unexplored in previous research. We contribute a large-scale dataset of 46K audio clips with human-written text pairs collected via crowdsourcing on the AudioSet dataset (Gemmeke et al., 2017). Our thorough empirical studies not only show that our collected captions are indeed loyal to the audio inputs but also discover what forms of audio representation and captioning models are effective for audio captioning. From extensive experiments, we also propose two novel components that are integrable with any attentionbased captioning model to help improve audio captioning performance: the top-down multiscale encoder and aligned semantic attention.", |
|
"pdf_parse": { |
|
"paper_id": "N19-1011", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We explore the problem of audio captioning 1 : generating natural language description for any kind of audio in the wild, which has been surprisingly unexplored in previous research. We contribute a large-scale dataset of 46K audio clips with human-written text pairs collected via crowdsourcing on the AudioSet dataset (Gemmeke et al., 2017). Our thorough empirical studies not only show that our collected captions are indeed loyal to the audio inputs but also discover what forms of audio representation and captioning models are effective for audio captioning. From extensive experiments, we also propose two novel components that are integrable with any attentionbased captioning model to help improve audio captioning performance: the top-down multiscale encoder and aligned semantic attention.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Captioning, the task of translating a multimedia input source into natural language, has been substantially studied over the past few years. The vast majority of the journey has been through the visual senses ranging from static images to videos. Yet, the exploration into the auditory sense has been circumscribed to human speech transcription (Panayotov et al., 2015; Nagrani et al., 2017) , leaving the basic natural form of sound in an uncharted territory of the captioning research.", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 369, |
|
"text": "(Panayotov et al., 2015;", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 391, |
|
"text": "Nagrani et al., 2017)", |
|
"ref_id": "BIBREF55" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, sound event detection has gained much attention such as DCASE challenges (Mesaros et al., 2017) along with the release of a large scale AudioSet dataset . However, sound classification (e.g. predicting multiple labels for a given sound) and event detection (e.g. localizing the sound of interest in a clip) may not be sufficient for a full understanding of the sound. Instead, a natural sen- 1 For a live demo and details, https://audiocaps.github.io tence offers a greater freedom to express a sound, because it allows to characterize objects along with their states, properties, actions and interactions. For example, suppose that suddenly sirens are ringing in the downtown area. As a natural reaction, people may notice the presence of an emergency vehicle, even though they are unable to see any flashing lights nor feel the rush of wind from a passing vehicle. Instead of simply tagging this sound as ambulance or siren, it is more informative to describe which direction the sound is coming from or whether the source of the sound is moving closer or further away, as shown in Figure 1. To that end, we address the audio captioning problem for audios in the wild, which has not been studied yet, to the best of our knowledge. This work focuses on one of the most important bases toward this research direction, contributing a large-scale dataset. The overarching sources of in-the-wild sounds are grounded on the Au-dioSet , so far the largest collection of sound events collected from Youtube videos. We newly collect human-written sentences for a subset of AudioSet audio clips via crowdsourcing on Amazon Mechanical Turk (section 3). We also develop two simple yet effective techniques to generate captions through the joint use of multi-level pretrained features and better attention mechanism named aligned-semantic attention (section 4). Lastly, we perform experiments contrasting between video-based captions and audio-focused captions by employing a variety of features and captioning models (section 5).", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 105, |
|
"text": "(Mesaros et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 403, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1094, |
|
"end": 1103, |
|
"text": "Figure 1.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The contributions of this work are as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. To the best of our knowledge, this work is the first attempt to address the audio captioning task for sound in the wild. We contribute its first large-scale dataset named AudioCaps, which consists of 46K pairs of audio clips and text description.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. We perform thorough empirical studies not only to show that our collected captions are indeed true to the audio inputs and but also to discover what forms of audio representations and captioning models are effective. For example, we observe that the embeddings from large-scale pretrained VGGish are powerful in describing the audio input, and both temporal and semantic attention are helpful to enhance captioning performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "3. From extensive experiments, we propose two simple yet effective technical components that further improve audio captioning performance: the top-down multi-scale encoder that enables the joint use of multi-level features and aligned semantic attention that advances the consistency between semantic attention and spatial/temporal attention.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Speech recognition and separation. One of the most eminent tasks for audio understanding may be speech recognition, the task of recognizing and translating human spoken language into text with less emphasis on background sound that may coexist. A multitude of datasets exist for such task e.g. Speech Commands dataset (Warden, 2018) , Common Voice dataset (Mozilla, 2017) , Librispeech (Panayotov et al., 2015) , LS Speech (Ito, 2017) . As one of similar lineage, automatic speech separation forks an input audio signal into several individual speech sources (Hershey et al., 2016; Ephrat et al., 2018) . To most of these tasks, in the wild sound is deemed as background noise to be removed as an obstructer of speech recognition. On the other hand, our work puts the spotlight on these neglected sounds and express them through natural language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 332, |
|
"text": "(Warden, 2018)", |
|
"ref_id": "BIBREF73" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 371, |
|
"text": "(Mozilla, 2017)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 410, |
|
"text": "(Panayotov et al., 2015)", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 434, |
|
"text": "(Ito, 2017)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 559, |
|
"end": 581, |
|
"text": "(Hershey et al., 2016;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 582, |
|
"end": 602, |
|
"text": "Ephrat et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Audio classification and sound event detection. This line of tasks emphasizes categorizing a sound into a set of predefined classes. There exist a number of datasets to aid in achieving this goal, including DCASE series (Stowell et al., 2015; Mesaros et al., 2016 Mesaros et al., , 2017 , UrbanSound8k (Salamon et al., 2014) , ESC (Piczak, 2015) . Au-dioSet (Gemmeke et al., 2017) is an audio event dataset collected from Youtube that is unsurpassed in terms of coverage and size, structured with an ontology containing 527 classes. Another predominant large-scale dataset is Freesound (Fonseca et al., 2017) . It consists of audio samples from freesound.org recordings based on the preceding AudioSet ontology. In contrast to audio classification, which uniquely map the audio to a set of labels, our task generates a descriptive sentence. Hence, it needs to not only detect salient sounds of classes but also explores their states, properties, actions or interactions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 242, |
|
"text": "(Stowell et al., 2015;", |
|
"ref_id": "BIBREF69" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 263, |
|
"text": "Mesaros et al., 2016", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 286, |
|
"text": "Mesaros et al., , 2017", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 324, |
|
"text": "(Salamon et al., 2014)", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 345, |
|
"text": "ESC (Piczak, 2015)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 348, |
|
"end": 380, |
|
"text": "Au-dioSet (Gemmeke et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 608, |
|
"text": "(Fonseca et al., 2017)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Captioning tasks and datasets. The vast majority of captioning tasks and datasets focus on the visual domain. Image captioning generates text description of an image, and numerous datasets are proposed, such as Flickr 8k (Rashtchian et al., 2010) , Flickr 30k (Young et al., 2014) , MS COCO (Lin et al., 2014) , DenseCap and Conceptual Captions (Sharma et al., 2018) . Akin to the image captioning is video captioning, for which there are many datasets too, including MSVD (Guadarrama et al., 2013) , MSR-VTT (Xu et al., 2016) , LSMDC (Rohrbach et al., 2017) and ActivityNet Captions (Krishna et al., 2017) .Compared to previous captioning tasks and datasets, our work confines the problem by focusing on in the wild audio inputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 246, |
|
"text": "(Rashtchian et al., 2010)", |
|
"ref_id": "BIBREF60" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 280, |
|
"text": "(Young et al., 2014)", |
|
"ref_id": "BIBREF78" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 309, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 366, |
|
"text": "(Sharma et al., 2018)", |
|
"ref_id": "BIBREF66" |
|
}, |
|
{ |
|
"start": 473, |
|
"end": 498, |
|
"text": "(Guadarrama et al., 2013)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 526, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 558, |
|
"text": "(Rohrbach et al., 2017)", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 606, |
|
"text": "(Krishna et al., 2017)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recently, there have been some efforts to solve video captioning with audio input (Hori et al., 2017 (Hori et al., , 2018 . However, the audio input merely serves as auxiliary features for video captioning, and as a result, it only marginally improves the performance (e.g. BLEU-4 score: 39.6 (video only) vs. 40.3 (video + MFCC) ). These results are partly culpable to dataset collection, where the annotators mostly rely on the video input. On the contrary, our collection induces the annotators to mainly abide to audio, hence, increasing the dependency of written text on the audio input as can be shown in our survey analysis in Figure 5 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 100, |
|
"text": "(Hori et al., 2017", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 121, |
|
"text": "(Hori et al., , 2018", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 634, |
|
"end": 642, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our AudioCaps dataset entails 46K audio caption pairs. Table 1 outlines its key statistics. The audio sources are rooted in AudioSet (Gemmeke et al., 2017), a large-scale audio event dataset, from which we draft the AudioCaps, as discussed below. We present more details of data collection and statistics in the Appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 62, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Audio Captioning Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "It is important to select qualified audio clips as the first step of dataset collection. The chosen categories of clips must be well-rounded in coverage of naturally occurring audios, be relevant to practical applications and appear with high frequency. To that end, we tailor the AudioSet dataset (Gemmeke et al., 2017) that comprises 1,789,621 human-labeled 10 second YouTube excerpts with an ontology of 527 audio event categories. However, an immediate collection of captions from these audios pose several difficulties: (i) too many audio clips, (ii) inconsistent level of abstraction among the classes, (iii) distribution bias of some labels and (iv) noisy labels that are only noticeable from visual cues. We circumvent these issues through a controlled sampling process as described below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AudioSet Tailoring", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Among 527 audio event categories of AudioSet, we first exclude all the labels whose number of clips are less than 1,000 to promote a balanced distribution within the dataset. We also remove all 151 labels in the music super-category, because they are often indiscernible even for a human. For example, a human with no expertise can hardly discriminate the sound of Guitar from Banjo. Thus, we set aside the musical territory for future exploration. We further discard categories if they do not satisfy the following two constraints. The word labels should be identifiable solely from sound (i) without requiring visuals (e.g. remove the category inside small room) and (ii) without requiring any expertise (e.g. remove power windows and electric windows because their distinction may be possible only for car experts). Fi- : Some statistics of AudioCaps dataset. We also show average and median (in parentheses) values. labels refer to the semantic attributes. nally, we select 75 word labels derived from 7 augmented super-categories as avoiding the sharp skewness in the word labels (e.g. 48.5% clips include speech label). We limit the number of instances per category to 2,000 by sampling with preference to audio clips associated with more word labels to prioritize the audios with diverse content. The final number of audio clips is about 115K, from which we obtain captions for 46K as the first version.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AudioSet Tailoring", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The collected captions should be precise, specific, diverse, expressive, large-scale and correlated with the paired audios with minimal visual presumptions. Such complex nature of our requirements necessitates employing crowdworkers through Amazon Mechanical Turk (AMT). Some qualification measures are set for the crowdworkers, such as they should hold a +95% HIT approval rate and the total number of approved HITs that are greater than 1,000 and be located at one of [AU, CA, GB, NZ, US]. In total, 108 caption writing workers and 3 caption reviewing workers participate and are compensated at 10 cents per clip.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Audio Annotation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Annotation Interface. Figure 2 shows our annotation interface, which is designed to minimize the visual presumption while maintaining diversity. Each task page consists of an audio clip of about 10 seconds, word hints and video hints.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 30, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Audio Annotation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The word hints are the word labels that are provided by AudioSet for the clip and are employed A train is approaching with a low rumble and rhythmic click and squeal Below officers creep toward the entrance the door and points a gun as hints to the crowdworkers. Even to humans, recognizing the true identity of a sound can be ambiguous, and thus the word hints act as a precursor to accurately guide the crowdworkers during the description process, while staying aloof from visual bias. Another benefit is that the diversity of the word labels may also enrich the expressiveness of the description. Also derived from Au-dioSet, the video hints are provided as a stronger hint for sounds that are too difficult even to the human ear or for clips associated with some erroneous or missing word hints (weak labels). We advise the workers to use them as a last resort measure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Audio Annotation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Some instructions 2 are also provided to demarcate crowdworkers' descriptions as follows. (i) Do not include the words for visuals in the video that are not present in the sound. (ii) Ignore speech semantics. (iii) When applicable, be detailed and expressive. (iv) Do not be imaginative and be literal and present with the descriptions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Audio Annotation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Quality Control. We use a qualification test to discern many crowdworkers who frequently violate the given instructions (e.g. transcribing instead of describing, just enumerating provided word hints or writing visual captions). Interested crowdworkers must participate in the test and submit a response, which the authors manually check and approve if they are eligible. We employ three additional workers to verify the data in accordance to our guidelines. In order to maintain high approval rates, we periodically blacklist malicious crowdworkers while granting reasonable incentives to benevolent workers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Audio Annotation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "2 https://audiocaps.github.io/ instruction_only.html.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Audio Annotation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We exclude the period symbol from all the captions, convert numbers to words using num2words 3 and correct grammar errors by languagetool 4 . We then tokenize words with spacy 5 . Finally, we build a dictionary V with a size of 4506 by choosing all the unique tokens. Figure 3 qualitatively compares some caption examples between our AudioCaps and two captioning datasets with audio: LSMDC (Rohrbach et al., 2017) and MSR-VTT (Xu et al., 2016) . Since both LSMDC and MSR-VTT focus more on describing videos than audios, their captions are characterized by visually grounded vocabularies (blue). On the other hand, the captions of AudioCaps accompany sound-based vocabularies (red).", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 413, |
|
"text": "(Rohrbach et al., 2017)", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 443, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF74" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 276, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Post-processing", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We present a hierarchical captioning model that can attend to the fine details of the audio. The backbone of our model is an LSTM (Hochreiter and Schmidhuber, 1997) that we fortify with two novel components which are easily integrable with any attention-based captioning model. The topdown multi-scale encoder enables the contextual use of multi-level features, and the aligned semantic attention enhances the consistency between semantic attention and temporal attention (see Figure 4) . Our experiments in section 5.3 show that these two techniques lead to non-trivial performance improvement.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 477, |
|
"end": 486, |
|
"text": "Figure 4)", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The input to our model are mel-frequency cepstral coefficient (MFCC) audio features (Davis and Mermelstein, 1980) and the output is a sequence of words {y m } M m=1 , each of which is a symbol from the dictionary. For text representation, we use fastText (Bojanowski et al., 2016) trained on the Common Crawl corpus to initialize the word embedding matrix W emb , which is finetuned with the model during training. We represent word sequences (e.g. attribute words for semantic attention and output words for answer captions) in a distributional space as {d n } N n=1 with d n = W emb w n where w n is a one-hot vector for n-th word in the word sequence and d n \u2208 R 300 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 113, |
|
"text": "(Davis and Mermelstein, 1980)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 280, |
|
"text": "(Bojanowski et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Unlike speech data, sound in the wild is not always continuous. It can be often brief, noisy, occluded, in-the-distance and randomly sparsed throughout the audio. Hence, the lower-level features can be useful to capture such characteristics of natural sound, although they may lack the semantics of the higher-level features. Thus, the joint use of these two levels of features can be mutually beneficial.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The top-down multi-scale encoder takes as input the two-level audio embedding {f t } T t=1 , {c t } T t=1 and generates the fused encoding vector, where T is the sequence length of the audio. For input, we use the features from the two layers of the pretrained VGGish network : the fc2 vector {f t } T t=1 as a high-level semantic feature, and the conv4 vector {c t } T t=1 as a mid-level feature.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The first level of hierarchy encodes high-level features {f t } T t=1 using a bi-directional LSTM. We regard the last hidden state as the global audio embedding h ctxt \u2208 R I :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2190 \u2192 h a1 t = biLSTM(f t , \u2212 \u2192 h a1 t\u22121 , \u2190 \u2212 h a1 t+1 ),", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h ctxt = W c [ \u2212 \u2192 h a1 T ; \u2190 \u2212 h a1 1 ] + b c ,", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where W c \u2208 R I\u00d7D 1 and b c \u2208 R I are parameters, I is the dimension of input to the next layer and D 1 is the dimension of the first layer hidden states.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We then reshape and encode mid-level features {c t } T t=1 \u2208 R 512 using another bi-directional LSTM. In order to inject the global semantics, we perform an element-wise addition of h ctxt to the mid-level feature along the time axis, and feed them into the bi-directional LSTM one at a time, producing a hidden state \u2190 \u2192 h a2 t \u2208 R D 2 at each step:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2190 \u2192 h a2 t = biLSTM(c t + h ctxt , \u2212 \u2192 h a2 t\u22121 , \u2190 \u2212 h a2 t+1 ). (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Top-down Multi-scale Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In many captioning models Yu et al., 2017; Laokulrat et al., 2018; Long et al., 2018) , semantic attention has been independently used from temporal/spatial attention. However, it can be troublesome because there may exist some discrepancies between the two attentions i.e. they do not attend to the same part of the input. For instance, given an audio of a cat meowing and a baby crying, temporal attention may attend to the crying baby while semantic attention attends to the word cat. We propose a simple yet effective approach that implicitly forces both semantic and temporal/spatial attention to be correctly aligned to one another to maximize the mutual consistency. For semantic attention, we extract a set of N attribute words for each audio: following You et al. (2016), we retrieve the nearest training audio from the subset of AudioSet and transfer its labels as attribute words. We encode each attribute word vector using a bi-directional LSTM (named semantic encoder):", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 42, |
|
"text": "Yu et al., 2017;", |
|
"ref_id": "BIBREF79" |
|
}, |
|
{ |
|
"start": 43, |
|
"end": 66, |
|
"text": "Laokulrat et al., 2018;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 67, |
|
"end": 85, |
|
"text": "Long et al., 2018)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2190 \u2192 h w n = biLSTM(d n , \u2212 \u2192 h w n\u22121 , \u2190 \u2212 h w n+1 ),", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where d n is the input text representation of the attribute word sequence. We then align these semantic word features \u2190 \u2192 h w n to the temporal axis of the audio features \u2190 \u2192 h a2 t via the attention flow layer (Seo et al., 2017) . For notational simplicity, we omit the bidirectional arrow in the following.", |
|
"cite_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 229, |
|
"text": "(Seo et al., 2017)", |
|
"ref_id": "BIBREF65" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Attention flow layer. We first compute the similarity matrix, S \u2208 R T \u00d7N between each pair of audio and word features using the score function \u03b1(h a2 t , h w n ) \u2208 R:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1(h a2 t , h w n ) = W \u03b1 [h a2 t ; h w n ; h a2 t \u2022 h w n ],", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S tn = \u03b1(h a2 t , h w n ),", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where \u2022 is element-wise multiplication.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We then use S to obtain the attentions and the attended vectors in two directions: word-to-audio {h w t } T t=1 \u2208 R D 2 and audio-to-wordh a2 \u2208 R D 2 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "a t = softmax(S t: ),h w t = n a tn h w n ,", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "b = softmax(max row (S)),h a2 = t b t h a2 t ,", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "a t \u2208 R N , b \u2208 R T .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Lastly, we concatenate them into {h f low t } T t=1 \u2208 R 4D 2 , while keeping the temporal axis intact:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "h f low t = [h a2 t ;h w t ; h a2 t \u2022h w t ; h a2 t \u2022h a2 ]. (9)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Temporal attention over attention flow. We now have an embedding that aligns the semantic features of words with the time steps of audio features. Subsequently, we apply temporal attention over it; the attention weight is calculated as in . Specifically, we use the global method for each t in {h f low t } T t=1 :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 m = align(h dec m , h f low t ),", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c m = t \u03b1 mt h f low t ,", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "a m = tanh(W dec [c m ; h dec m ]),", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where h dec m \u2208 R D o is the state of the decoder LSTM, c m \u2208 R 4D 2 is the context vector, \u03b1 m \u2208 R T is the attention mask, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "W dec \u2208 R D o \u00d7(4D 2 +D o ) is a parameter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Next, we obtain the output word probability:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s m = softmax(W o a m )", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "W o \u2208 R V \u00d7D o .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Finally, we select the output word as y m+1 = argmax s\u2208V (s m ). We repeat this process until y m+1 reaches an EOS token. The model is trained to maximize the loglikelihood assigned to the target labels via the softmax as done in most captioning models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aligned Semantic Attention", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We perform several quantitative evaluations to provide more insights about our AudioCaps dataset. Specifically, our experiments are designed to answer the following questions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1. Are the collected captions indeed faithful to the audio inputs?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "2. Which audio features are useful for audio captioning on our dataset?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "3. What techniques can improve the performance of audio captioning?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We present further implementation details and more experimental results in the Appendix. Some resulting audio-caption pairs can be found at https://audiocaps.github.io/supp.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Before presenting the results of our experiments on these three questions, we first explain the experimental setting and baseline models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Evaluation metrics. Audio captioning can be quantitatively evaluated by the language similarity between the predicted sentences and the groundtruths (GTs) such as BLEU (Papineni et al., 2002) , CIDEr (Vedantam et al., 2015) , METEOR (Banerjee and Lavie, 2005), ROUGE-L (Lin, 2004) and SPICE (Anderson et al., 2016) . In all metrics, higher scores indicate better performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 191, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 223, |
|
"text": "(Vedantam et al., 2015)", |
|
"ref_id": "BIBREF71" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 280, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 314, |
|
"text": "(Anderson et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Audio features. Audios are resampled to 16kHz, and stereo is converted into mono by averaging both channels. We zero-pad clips that are shorter than 10 seconds and extract three levels of audio features. For the low-level audio feature, the lengthy raw audios are average-pooled by the WaveNet encoder as in . For the mid-level feature, mel-frequency cepstral coefficients (MFCC) ( Davis and Mermelstein, 1980) are extracted using librosa (McFee et al., 2015) with a window size of 1024, an overlap of 360 and the number of frames at 240, and encoded further with a bi-directional LSTM followed by a gated convolutional encoder . Lastly, we use two high-level features: the 24th output layer of SoundNet 6 (Aytar et al., 2016) with a (10 \u00d7 1024) dimension and the final output embedding of VGGish 7 ) with a (10 \u00d7 128) dimension of (time \u00d7 embedding).", |
|
"cite_spans": [ |
|
{ |
|
"start": 382, |
|
"end": 410, |
|
"text": "Davis and Mermelstein, 1980)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 459, |
|
"text": "(McFee et al., 2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Video features. To contrast with video captioning datasets, we also extract video features at the frame-level and at the sequence-level from YouTube clips. For frame features, we use VGG16 (Simonyan and Zisserman, 2015) pretrained on the ILSVRC-2014 dataset (Russakovsky et al., 2015) . For sequence features, we use C3D 8 (Tran et al., 2015) pretrained on the Sport1M dataset (Karpathy et al., 2014) . We extract subsequent frames with 50% overlap centered at each time step on the input clips for Au-dioSet videos, while proceeding with no overlap for MSR-VTT clips as in the original paper. We sample videos at 25fps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 219, |
|
"text": "(Simonyan and Zisserman, 2015)", |
|
"ref_id": "BIBREF67" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 284, |
|
"text": "(Russakovsky et al., 2015)", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 342, |
|
"text": "(Tran et al., 2015)", |
|
"ref_id": "BIBREF70" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 400, |
|
"text": "(Karpathy et al., 2014)", |
|
"ref_id": "BIBREF43" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setting", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Retrieval methods. As straightforward baselines, we test the 1-nearest search with audio features, denoted by 1NN-MFCC, 1NN-SoundNet and 1NN-VGGish. For a query audio, we find its closest training audio using the 2 distance on the features and return its text as a prediction. We mean-pool all the audio features over time, because it empirically leads to a strong performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "LSTM methods. As simple generative baselines, we test with the LSTM decoder, denoted by -LSTM postfix, where the encoded audio feature is set as the initial state of the LSTM. For instance, WaveNet-LSTM is the model with the WaveNet encoder and the LSTM decoder. We use a singlelayer LSTM with dropout (Srivastava et al., 2014) and layer normalization (Ba et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 327, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF68" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 369, |
|
"text": "(Ba et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Attention models. We test two popular attention models developed in video captioning research: (i) TempAtt generates captions by selectively attending to audio features over time, and (ii) SemAtt creates text attending to attribute words as secondary information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Our models. We denote our top-down multiscale encoder as the prefix TopDown-and aligned semantic attention as AlignedAtt-.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Upper-bounds. Given that each test data has five human-generated captions, we perform cross validation on the five GT captions as an upperbound of performance denoted as Human. We regard one of five human annotations as model prediction and compute the performance metric with the other four as ground-truths. After doing this on each of five, we then average the scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "8 https://github.com/facebook/C3D.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "MSR-VTT AUDIO (a) VISUAL (b) BOTH (c) 33.2% (c) 43.7% (b) 23.1% (a) 4.2% (a) 33.5% (c) 62.3% (b) Figure 5: Comparison of vocabulary tag distribution between AudioCaps and MSR-VTT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 37, |
|
"text": "(c)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 44, |
|
"end": 47, |
|
"text": "(c)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 54, |
|
"end": 57, |
|
"text": "(b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 67, |
|
"text": "(a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 73, |
|
"end": 76, |
|
"text": "(a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 86, |
|
"text": "(c)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 96, |
|
"text": "(b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AudioCaps", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We discuss experimental results in response to the three questions regarding the AudioCaps dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We first evaluate whether the collected audiobased captions are indeed loyal to the audio clips.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Audio vs Video Captioning", |
|
"sec_num": "5.3.1" |
|
}, |
|
{ |
|
"text": "As one possible method to validate it, we perform comparative experiments with the video-oriented MSR-VTT dataset (Xu et al., 2016) . Note that MSR-VTT and AudioCaps both provide pairs of audio clips and its corresponding videos, allowing us to perform this comparative study. We hypothesize that the captions from MSR-VTT would not coherently map to audio features, because they are written mainly based on the visual information. In contrast, AudioCaps captions would be better aligned to audio features than visual features. The results in Table 4 support our hypothesis. In MSR-VTT, the video-based captioning model C3D-LSTM attains better scores than the preceding three audio-captioning models * -LSTM, while in AudioCaps the video-based model performs far worse than the audio models. This may be due to our collection method of AudioCaps, which encourages turkers to submit the descriptions based on the audio rather than the visual.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 131, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF74" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 543, |
|
"end": 550, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Audio vs Video Captioning", |
|
"sec_num": "5.3.1" |
|
}, |
|
{ |
|
"text": "Vocabulary comparison. We also make comparisons between AudioCaps and MSR-VTT in terms of vocabulary usage in the captions. We select the 1,800 most frequent vocabularies of verbs, adjectives and adverbs from each dataset, and run a user study in which three different workers are asked to categorize each sampled word into one of (Audio, Visual, Both, Not Applicable). The category label per word is decided by a majority vote of three workers' opinions. We use AMT once more to collect the unbiased opinions. In or- der to guarantee thoughtful submissions, we ask the workers to provide a description using the word. We compensate $0.05 per word to Englishspeaking workers with a 95% approval rate. Figure 5 shows that AudioCaps has more vocabularies tagged as Audio (e.g. neighs, rustling) by 18.9%p more than MSR-VTT. Furthermore, 56.3% of the total vocabularies in AudioCaps are categorized as audio-related, that is, labeled as Audio or Both (e.g. vibrating, applauds). Hence, this vocabulary comparison result reassures that Au-dioCaps is more audio-oriented than MSR-VTT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 701, |
|
"end": 709, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Audio vs Video Captioning", |
|
"sec_num": "5.3.1" |
|
}, |
|
{ |
|
"text": "The methods in the second group of Table 2 are compared to investigate which audio features are more suitable for captioning on AudioCaps. The best results are obtained by VGGish-LSTM. This may be because VGGish is pretrained on YouTube audio clips, similar to AudioCaps. Although the topics of YouTube are extremely diverse, the domain proximity may help VGGish learn more utilizable features for AudioCaps. SoundNet-LSTM shows inferior performance compared to VGGish-LSTM, one possible reason being because it is pretrained with Flickr videos, which are rather distant in domain from the source of our dataset, in terms of topic diversity and the amount of possible noise. MFCC-LSTM does not perform as well as VGGish-LSTM, even with the similar convolutional recurrent encoder. This result hints that pretraining with a proper dataset is essential for audio captioning. A comparison between MFCC-LSTM and WaveNet-LSTM reveals that using MFCC is better than directly taking raw waveform as input. The raw waveform is relatively long (>500\u00d7 longer than MFCC); hence, it may pose a difficulty for RNN-based encoders to precisely represent the whole audio context.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 35, |
|
"end": 42, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Audio Features", |
|
"sec_num": "5.3.2" |
|
}, |
|
{ |
|
"text": "Temporal attention consistently boosts the captioning performance of the LSTM decoder in all audio features, as shown in the models with TempAtt-prefix in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 162, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Models", |
|
"sec_num": "5.3.3" |
|
}, |
|
{ |
|
"text": "No-(Ours) a man and woman talking, then a baby crying (1NN-VGGish) an engine runs, and people speak (SemTempAtt) a dog barks and a woman laughs (GT) a child cries followed by a man and woman speaking and then the child resumes sobbing (Ours) a truck engine is running, a siren is occurring, and an adult male speaks (1NN-VGGish) a loud fire engine is followed by an emergency siren (SemTempAtt) emergency sirens and a siren blaring (GT) a large engine passes as people speak followed by a siren (Ours) a large explosion followed by a loud pop (1NN-VGGish) a man speaking followed by a loud bang (SemTempAtt) a large explosion followed by a loud splash and thunder (GT) a whooshing noise followed by an explosion (Ours) a small motor is running, whirring occurs, and a high-pitched whine is present (1NN-VGGish) a drill is operated, then a man speaks and restarts the drill (SemTempAtt) a small motor running and a man speaking (GT) a drone whirring followed by a crashing sound Figure 6 : Four examples of audio captioning with captured video frames, grouthtruths (GT), and generated captions by our method (Ours) and baselines. They can be heard at https://audiocaps.github.io/supp.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 978, |
|
"end": 986, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Models", |
|
"sec_num": "5.3.3" |
|
}, |
|
{ |
|
"text": "tably, a large performance gain is observed for TempAtt-MFCC-LSTM. This may be because MFCC features are transformed to temporally longer features than SoundNet and VGGish features (240 > 10), and thus allow temporal attention to better aid the model and bypass the vanishing gradient problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of Models", |
|
"sec_num": "5.3.3" |
|
}, |
|
{ |
|
"text": "The semantic attention is also favorable for captioning performance, as SemTempAtt(1NN)-VGGish-LSTM in Table 3 slightly outperforms TempAtt-VGGish(FC2)-LSTM in Table 2 . That is, the additional use of semantic attention enhances the temporal attention model. Obviously, when using GT labels instead of 1NN retrieved labels as attribute words, the performance increases much, hinting that better semantic attributes are more synergetic with the aligned attention.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 110, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 167, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Models", |
|
"sec_num": "5.3.3" |
|
}, |
|
{ |
|
"text": "The comparison between different layers (C4, C3, FC2) confirms the effectiveness of jointly using multi-level features. The fused features by the top-down multi-scale encoder (i.e. TopDown-) prove the most beneficial as they outperform their counterparts in Table 2 . However, a stack of (FC2,C4) layers performs the best, while the three layer stack is slightly inferior, presumably due to overfitting and weak information flow between the upper and lower levels of the stacks. Finally, our best performing model is TopDown-AlignedAtt where both the topdown multi-scale encoder and aligned semantic attention are jointly used. We postulate that the two techniques synergize well thanks to rich information provided by TopDown allowing for better attention alignment. Figure 6 shows selected examples of audio captioning. In each set, we show a video frame, GT and text descriptions generated by our method and baselines. Many audio clips consist of sounds with multiple sources in sequence, for which baselines often omit some details or mistakenly order the event sequence, whereas our model is better at capturing the details in the correct order.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 265, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 768, |
|
"end": 776, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Models", |
|
"sec_num": "5.3.3" |
|
}, |
|
{ |
|
"text": "We addressed a new problem of audio captioning for sound in the wild. Via Amazon Mechanical Turk, we contributed a large-scale dataset named AudioCaps, consisting of 46K pairs of audio clips and human-written text. In our experiments, we showed that the collected captions were indeed faithful to the audio inputs as well as improve the captions by two newly proposed components: the top-down multi-scale encoder and aligned semantic attention.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "There are several possible directions beyond this work. First, we can further expand the scope of AudioCaps. Second, our model is integrable with speech counterparts to achieve more complete auditory captioning tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Clips Sentences Unique clips Tokens Vocabs Nouns Verbs Adjectives Adverbs Duration(h) MSR-VTT 10,000 200,000 7,180 1,856,523 29,316 16,437 6,379 3,761 872 41.2 LSMDC 128,085 128,118 200 1,157,155 22,500 12,181 3,394 5,633 1,292 147 AudioCaps 39,106 43,022 39,106 567,927 4,506 2,747 1,825 766 353 108.6 Table 5 : Comparison of AudioCaps with MSR-VTT (Xu et al., 2016) , LSMDC (Rohrbach et al., 2017) . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 370, |
|
"end": 387, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 419, |
|
"text": "(Rohrbach et al., 2017)", |
|
"ref_id": "BIBREF61" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 330, |
|
"text": "7,180 1,856,523 29,316 16,437 6,379 3,761 872 41.2 LSMDC 128,085 128,118 200 1,157,155 22,500 12,181 3,394 5,633 1,292 147 AudioCaps 39,106 43,022 39,106 567,927 4,506 2,747 1,825 766 353 108.6 Table 5", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/savoirfairelinux/ num2words.4 https://github.com/languagetool-org/ languagetool.5 https://spacy.io.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/cvondrick/soundnet. 7 https://github.com/tensorflow/models/ tree/master/research/audioset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank SNU Vision & Learning Lab members and Yunseok Jang for the helpful comments and discussions. This work is supported by Kakao and Kakao Brain corporations and the international cooperation program by the NRF of Korea (NRF-2018K2A9A2A11080927). Gunhee Kim is the corresponding author.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the supplemental material, we enlist the following which may shed further insights:\u2022 Additional related work [section A]\u2022 Additional dataset analysis [section B] \u2022 Training Details [section C]", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 164, |
|
"text": "[section B]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Audio-Visual correspondence. Over the past year, a great interest has been shone to the interconnection of auditory and visual senses. The task of localizing the sound source within the visual input have been actively explored (Nagrani et al., 2017; Chung et al., 2018; Senocak et al., 2018; Afouras et al., 2018; Gao et al., 2018; Arandjelovic and Zisserman, 2018; Zhao et al., 2018) , along with blind source separation aided by visual features (Ephrat et al., 2018) and learning of audio-visual multisensory representation (Owens and Efros, 2018) . These previous studies compensate the lack of information in the auditory input with visual information, whereas this work focuses solely on the auditory input to generate informative descriptions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 227, |
|
"end": 249, |
|
"text": "(Nagrani et al., 2017;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 269, |
|
"text": "Chung et al., 2018;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 291, |
|
"text": "Senocak et al., 2018;", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 313, |
|
"text": "Afouras et al., 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 331, |
|
"text": "Gao et al., 2018;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 365, |
|
"text": "Arandjelovic and Zisserman, 2018;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 384, |
|
"text": "Zhao et al., 2018)", |
|
"ref_id": "BIBREF80" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 468, |
|
"text": "(Ephrat et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 549, |
|
"text": "(Owens and Efros, 2018)", |
|
"ref_id": "BIBREF56" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Related Work", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The full ontology of selected labels is outlined in Figure 7 . Figure 8 shows the number of clips per word label. The original AudioSet has an extreme label bias. For instance, a difference of 660,282 between the average of top 3 most common and average of top 3 most uncommon classes. Whereas our dataset at the moment has a difference of 971. Notice the label bias is significantly reduced in comparison to the original AudioSet. We plan to reduce this further in the upcoming releases. Table 5 compares our audio captioning dataset with some representative benchmarks of video captioning: MSR-VTT (Xu et al., 2016) and LSMDC (Rohrbach et al., 2017) . One interesting Source Ambiguous property of our dataset is that the portion of verbs in the vocabularies are larger than the others. This may imply that the captions describe what is happening rather than what is in the content.", |
|
"cite_spans": [ |
|
{ |
|
"start": 600, |
|
"end": 617, |
|
"text": "(Xu et al., 2016)", |
|
"ref_id": "BIBREF74" |
|
}, |
|
{ |
|
"start": 628, |
|
"end": 651, |
|
"text": "(Rohrbach et al., 2017)", |
|
"ref_id": "BIBREF61" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 60, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 63, |
|
"end": 71, |
|
"text": "Figure 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 496, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "All the parameters are initialized with Xavier method (Glorot and Bengio, 2010) . We apply the Adam optimizer (Kingma and Ba, 2015) with \u03b2 1 = 0.9, \u03b2 2 = 0.999 and = 1e \u2212 8. Do not describe events that may have happened in the past or future. i.e., describe the audio clip as it is (all instruction examples do this in the link above). Use Present Tense. We provide Word-labels. Feel free to actively use them in your description. Their purpose is to aid you in choosing the vocab of the sound sources. (Hover over them to obtain their definitions) Do not give speaker proper names, but rather give gender and maybe approximate age if salient. e.g., old; young; little; adult; kid; she; he; male; female. They cannot be presenters; broadcasters; announcers. Try to be Detailed and Expressive (Instruction example 3). If video hint is used, DO NOT include visuals in the video that are not present in the sound (Instruction example 1). Do not start the caption containing \"this is\", \"there is\", \"this is the sound of\", \"this sounds like\", \"you can hear\", \"in this video\".. etc. Get straight to the point. Ignore speech semantics (Instruction example 4). This includes no direction of speech!(Instruction example 4.2) If youtube link is broken, notify us via email, or type \"video unavailable\" and submit.Experts will be checking through each of your answers to block and or reject any malicious workers.Common mistake: Simply separating the sounds by multiple commas. It needs to be a connected coherent sentence! try conjunctions(immediately, shortly after, leading up to, followed by, and, along with, together with, concurrently, etc!). for Higher Acceptance Rate: Distance, Frequency (if sound is repeated Instruction 7), Speed, Volume of the sounds included in the descriptions are some of the best ways for the experts to accept the Hit. You must ACCEPT the HIT before you can submit the results. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 79, |
|
"text": "(Glorot and Bengio, 2010)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 131, |
|
"text": "(Kingma and Ba, 2015)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C Training Details", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Table 2: Captioning results of different methods on AudioCaps measured by language similarity metrics. Methods B-1 B-2 B-3 B-4 METEOR CIDEr ROUGE-L SPICE", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 2: Captioning results of different methods on AudioCaps measured by language similarity metrics. Methods B-1 B-2 B-3 B-4 METEOR CIDEr ROUGE-L SPICE", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "SemTempAtt(1NN)-VGGish-LSTM (You et", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Al", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "SemTempAtt(1NN)-VGGish-LSTM (You et al., 2016) 62", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "AlignedAtt(1NN)-VGGish-LSTM", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "AlignedAtt(1NN)-VGGish-LSTM", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "AlignedAtt(GT)-VGGish-LSTM", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "AlignedAtt(GT)-VGGish-LSTM", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The Conversation: Deep Audio-Visual Speech Enhancement", |
|
"authors": [], |
|
"year": 2018, |
|
"venue": "References Triantafyllos Afouras", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "References Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. 2018. The Conversation: Deep Audio- Visual Speech Enhancement. In Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "SPICE: Semantic Propositional Image Caption Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Basura", |
|
"middle": [], |
|
"last": "Fernando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Gould", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. 2016. SPICE: Semantic Proposi- tional Image Caption Evaluation. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Objects that Sound", |
|
"authors": [ |
|
{ |
|
"first": "Relja", |
|
"middle": [], |
|
"last": "Arandjelovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Relja Arandjelovic and Andrew Zisserman. 2018. Ob- jects that Sound. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "SoundNet: Learning Sound Representations from Unlabeled Video", |
|
"authors": [ |
|
{ |
|
"first": "Yusuf", |
|
"middle": [], |
|
"last": "Aytar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Vondrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusuf Aytar, Carl Vondrick, and Antonio Torralba. 2016. SoundNet: Learning Sound Representations from Unlabeled Video. In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Layer Normalization. In Stat", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [ |
|
"Lei" |
|
], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [ |
|
"Ryan" |
|
], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hin- ton. 2016. Layer Normalization. In Stat.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments", |
|
"authors": [ |
|
{ |
|
"first": "Satanjeev", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ACL Workshop MTSumm", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An Automatic Metric for MT Evaluation with Im- proved Correlation with Human Judgments. In ACL Workshop MTSumm.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Enriching Word Vectors with Subword Information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "TACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2016. Enriching Word Vectors with Subword Information. In TACL.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Arsha Nagrani, and Andrew Zisserman", |
|
"authors": [ |
|
{ |
|
"first": "Chung", |
|
"middle": [], |
|
"last": "Joon Son", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joon Son Chung, Arsha Nagrani, and Andrew Zisser- man. 2018. VoxCeleb2: Deep Speaker Recognition. In Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Comparison of Parametric Representations for Monosyllabic Word Recognition in Continuously Spoken Sentences", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Steven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mermelstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1980, |
|
"venue": "TASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven B Davis and Paul Mermelstein. 1980. Com- parison of Parametric Representations for Mono- syllabic Word Recognition in Continuously Spoken Sentences. In TASSP.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Neural Audio Synthesis of Musical Notes with WaveNet Autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Engel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cinjon", |
|
"middle": [], |
|
"last": "Resnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sander", |
|
"middle": [], |
|
"last": "Dieleman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Eck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jesse Engel, Cinjon Resnick, Adam Roberts, Sander Dieleman, Mohammad Norouzi, Douglas Eck, and Karen Simonyan. 2017. Neural Audio Synthesis of Musical Notes with WaveNet Autoencoders. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Looking to Listen at the Cocktail Party: A Speaker-Independent Audio-Visual Model for Speech Separation", |
|
"authors": [ |
|
{ |
|
"first": "Ariel", |
|
"middle": [], |
|
"last": "Ephrat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inbar", |
|
"middle": [], |
|
"last": "Mosseri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oran", |
|
"middle": [], |
|
"last": "Lang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tali", |
|
"middle": [], |
|
"last": "Dekel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avinatan", |
|
"middle": [], |
|
"last": "Hassidim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Freeman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rubinstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ariel Ephrat, Inbar Mosseri, Oran Lang, Tali Dekel, Kevin Wilson, Avinatan Hassidim, William T Free- man, and Michael Rubinstein. 2018. Looking to Listen at the Cocktail Party: A Speaker-Independent Audio-Visual Model for Speech Separation. In SIG- GRAPH.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Freesound Datasets: A Platform for the Creation of Open Audio Datasets", |
|
"authors": [ |
|
{ |
|
"first": "Eduardo", |
|
"middle": [], |
|
"last": "Fonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordi", |
|
"middle": [], |
|
"last": "Pons Puig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Favory", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederic", |
|
"middle": [ |
|
"Font" |
|
], |
|
"last": "Corbera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Bogdanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andres", |
|
"middle": [], |
|
"last": "Ferraro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergio", |
|
"middle": [], |
|
"last": "Oramas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alastair", |
|
"middle": [], |
|
"last": "Porter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Serra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ISMIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eduardo Fonseca, Jordi Pons Puig, Xavier Favory, Frederic Font Corbera, Dmitry Bogdanov, Andres Ferraro, Sergio Oramas, Alastair Porter, and Xavier Serra. 2017. Freesound Datasets: A Platform for the Creation of Open Audio Datasets. In ISMIR.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning to Separate Object Sounds by Watching Unlabeled Video", |
|
"authors": [ |
|
{ |
|
"first": "Ruohan", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rogerio", |
|
"middle": [], |
|
"last": "Feris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristen", |
|
"middle": [], |
|
"last": "Grauman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruohan Gao, Rogerio Feris, and Kristen Grauman. 2018. Learning to Separate Object Sounds by Watching Unlabeled Video. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Audio Set: An Ontology and Human-labeled Dataset for Audio Events", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jort F Gemmeke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dylan", |
|
"middle": [], |
|
"last": "Ellis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aren", |
|
"middle": [], |
|
"last": "Freedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Channing", |
|
"middle": [], |
|
"last": "Lawrence", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manoj", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marvin", |
|
"middle": [], |
|
"last": "Plakal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jort F Gemmeke, Daniel PW Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R Channing Moore, Manoj Plakal, and Marvin Ritter. 2017. Audio Set: An Ontology and Human-labeled Dataset for Audio Events. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Understanding the Difficulty of Training Deep Feedforward Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Glorot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "AISTATS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the Difficulty of Training Deep Feedforward Neural Networks. In AISTATS.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "YouTube2Text: Recognizing and Describing Arbitrary Activities Using Semantic Hierarchies and Zero-Shot Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Sergio", |
|
"middle": [], |
|
"last": "Guadarrama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niveda", |
|
"middle": [], |
|
"last": "Krishnamoorthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Malkarnenkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subhashini", |
|
"middle": [], |
|
"last": "Venugopalan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [], |
|
"last": "Mooney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Darrell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate", |
|
"middle": [], |
|
"last": "Saenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergio Guadarrama, Niveda Krishnamoorthy, Girish Malkarnenkar, Subhashini Venugopalan, Raymond Mooney, Trevor Darrell, and Kate Saenko. 2013. YouTube2Text: Recognizing and Describing Ar- bitrary Activities Using Semantic Hierarchies and Zero-Shot Recognition. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Deep Clustering: Discriminative Embeddings for Segmentation and Separation", |
|
"authors": [ |
|
{ |
|
"first": "Zhuo", |
|
"middle": [], |
|
"last": "John R Hershey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinji", |
|
"middle": [], |
|
"last": "Roux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John R Hershey, Zhuo Chen, Jonathan Le Roux, and Shinji Watanabe. 2016. Deep Clustering: Discrim- inative Embeddings for Segmentation and Separa- tion. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "CNN Architectures for Large-Scale Audio Classification", |
|
"authors": [ |
|
{ |
|
"first": "Shawn", |
|
"middle": [], |
|
"last": "Hershey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sourish", |
|
"middle": [], |
|
"last": "Chaudhuri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Daniel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ellis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Jort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aren", |
|
"middle": [], |
|
"last": "Gemmeke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Channing", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manoj", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devin", |
|
"middle": [], |
|
"last": "Plakal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Platt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Saurous", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Seybold", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shawn Hershey, Sourish Chaudhuri, Daniel PW Ellis, Jort F Gemmeke, Aren Jansen, R Channing Moore, Manoj Plakal, Devin Platt, Rif A Saurous, Bryan Seybold, et al. 2017. CNN Architectures for Large- Scale Audio Classification. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Long Short-Term Memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long Short-Term Memory. Neural Computation.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "End-to-End Audio Visual Scene-Aware Dialog using Multimodal Attention-based Video Features", |
|
"authors": [ |
|
{ |
|
"first": "Chiori", |
|
"middle": [], |
|
"last": "Hori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huda", |
|
"middle": [], |
|
"last": "Alamri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jue", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gordon", |
|
"middle": [], |
|
"last": "Wichern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takaaki", |
|
"middle": [], |
|
"last": "Hori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Cherian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Marks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Cartillier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raphael", |
|
"middle": [ |
|
"Gontijo" |
|
], |
|
"last": "Lopes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irfan", |
|
"middle": [], |
|
"last": "Essa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.08409" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chiori Hori, Huda Alamri, Jue Wang, Gordon Wich- ern, Takaaki Hori, Anoop Cherian, Tim K. Marks, Vincent Cartillier, Raphael Gontijo Lopes, Abhishek Das, Irfan Essa, Dhruv Batra, and Devi Parikh. 2018. End-to-End Audio Visual Scene-Aware Dialog us- ing Multimodal Attention-based Video Features. In arXiv:1806.08409.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Attention-based Multimodal Fusion for Video Description", |
|
"authors": [ |
|
{ |
|
"first": "Chiori", |
|
"middle": [], |
|
"last": "Hori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takaaki", |
|
"middle": [], |
|
"last": "Hori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teng-Yok", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bret", |
|
"middle": [], |
|
"last": "Harsham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Hershey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuhiko", |
|
"middle": [], |
|
"last": "Marks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sumi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chiori Hori, Takaaki Hori, Teng-Yok Lee, Ziming Zhang, Bret Harsham, John R Hershey, Tim K Marks, and Kazuhiko Sumi. 2017. Attention-based Multimodal Fusion for Video Description. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "The LJ Speech Dataset", |
|
"authors": [ |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Ito", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Keith Ito. 2017. The LJ Speech Dataset. https:// keithito.com/LJ-Speech-Dataset/.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "DenseCap: Fully Convolutional Localization Networks for Dense Captioning", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Johnson, Andrej Karpathy, and Li Fei-Fei. 2016. DenseCap: Fully Convolutional Localization Net- works for Dense Captioning. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Large-Scale Video Classification with Convolutional Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Toderici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanketh", |
|
"middle": [], |
|
"last": "Shetty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Leung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Sukthankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrej Karpathy, George Toderici, Sanketh Shetty, Thomas Leung, Rahul Sukthankar, and Li Fei-Fei. 2014. Large-Scale Video Classification with Con- volutional Neural Networks. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Adam: A Method for Stochastic Optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Dense-Captioning Events in Videos", |
|
"authors": [ |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Hata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederic", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Carlos" |
|
], |
|
"last": "Niebles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. 2017. Dense-Captioning Events in Videos. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Incorporating Semantic Attention in Video Description Generation", |
|
"authors": [ |
|
{ |
|
"first": "Natsuda", |
|
"middle": [], |
|
"last": "Laokulrat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naoaki", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideki", |
|
"middle": [], |
|
"last": "Nakayama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Natsuda Laokulrat, Naoaki Okazaki, and Hideki Nakayama. 2018. Incorporating Semantic Attention in Video Description Generation. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "ROUGE: A Package for Automatic Evaluation of Summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "TSBO", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. ROUGE: A Package for Auto- matic Evaluation of Summaries. In TSBO.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Microsoft COCO: Common objects in Context", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deva", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft COCO: Common objects in Context. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Video Captioning with Multi-Faceted Attention", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuang", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "De Melo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Long, Chuang Gan, and Gerard de Melo. 2018. Video Captioning with Multi-Faceted Atten- tion. TACL.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Effective Approaches to Attentionbased Neural Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thang Luong, Hieu Pham, and Christopher D Man- ning. 2015. Effective Approaches to Attention- based Neural Machine Translation. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Eric Battenberg, and Oriol Nieto. 2015. librosa: Audio and Music Signal Analysis in Python", |
|
"authors": [ |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Mcfee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawen", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ellis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Mcvicar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "SCIPY", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brian McFee, Colin Raffel, Dawen Liang, Daniel El- lis, Matt McVicar, Eric Battenberg, and Oriol Nieto. 2015. librosa: Audio and Music Signal Analysis in Python. In SCIPY.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Bhiksha Raj, and Tuomas Virtanen. 2017. DCASE 2017 Challenge Setup: Tasks, Datasets and Baseline System. In DCASE", |
|
"authors": [ |
|
{ |
|
"first": "Annamaria", |
|
"middle": [], |
|
"last": "Mesaros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toni", |
|
"middle": [], |
|
"last": "Heittola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aleksandr", |
|
"middle": [], |
|
"last": "Diment", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Elizalde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanuel", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annamaria Mesaros, Toni Heittola, Aleksandr Diment, Benjamin Elizalde, Ankit Shah, Emmanuel Vincent, Bhiksha Raj, and Tuomas Virtanen. 2017. DCASE 2017 Challenge Setup: Tasks, Datasets and Baseline System. In DCASE.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "TUT Database for Acoustic Scene Classification and Sound Event Detection", |
|
"authors": [ |
|
{ |
|
"first": "Annamaria", |
|
"middle": [], |
|
"last": "Mesaros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toni", |
|
"middle": [], |
|
"last": "Heittola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tuomas", |
|
"middle": [], |
|
"last": "Virtanen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EUSIPCO", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annamaria Mesaros, Toni Heittola, and Tuomas Virta- nen. 2016. TUT Database for Acoustic Scene Clas- sification and Sound Event Detection. In EUSIPCO.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Mozilla Common Voice", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mozilla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mozilla. 2017. Mozilla Common Voice. https:// voice.mozilla.org/.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "VoxCeleb: A Large-Scale Speaker Identification Dataset", |
|
"authors": [ |
|
{ |
|
"first": "Arsha", |
|
"middle": [], |
|
"last": "Nagrani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arsha Nagrani, Joon Son Chung, and Andrew Zisser- man. 2017. VoxCeleb: A Large-Scale Speaker Iden- tification Dataset. In Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Audio-Visual Scene Analysis with Self-Supervised Multisensory Features", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Owens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexei", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Efros", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Owens and Alexei A. Efros. 2018. Audio- Visual Scene Analysis with Self-Supervised Multi- sensory Features. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Librispeech: An ASR Corpus Based on Public Domain Audio Books", |
|
"authors": [ |
|
{ |
|
"first": "Vassil", |
|
"middle": [], |
|
"last": "Panayotov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guoguo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. 2015. Librispeech: An ASR Corpus Based on Public Domain Audio Books. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "BLEU: A Method for Automatic Evaluation of Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: A Method for Automatic Evaluation of Machine Translation. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "ESC: Dataset for Environmental Sound Classification", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Karol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Piczak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ACM MM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karol J Piczak. 2015. ESC: Dataset for Environmental Sound Classification. In ACM MM.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Collecting Image Annotations Using Amazon's Mechanical Turk", |
|
"authors": [ |
|
{ |
|
"first": "Cyrus", |
|
"middle": [], |
|
"last": "Rashtchian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micah", |
|
"middle": [], |
|
"last": "Hodosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cyrus Rashtchian, Peter Young, Micah Hodosh, and Julia Hockenmaier. 2010. Collecting Image Annota- tions Using Amazon's Mechanical Turk. In NAACL- HLT.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "Movie Description", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atousa", |
|
"middle": [], |
|
"last": "Torabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niket", |
|
"middle": [], |
|
"last": "Tandon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Pal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Larochelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IJCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Rohrbach, Atousa Torabi, Marcus Rohrbach, Niket Tandon, Christopher Pal, Hugo Larochelle, Aaron Courville, and Bernt Schiele. 2017. Movie Description. In IJCV.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "ImageNet Large Scale Visual Recognition Challenge. IJCV", |
|
"authors": [ |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Russakovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Krause", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Satheesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, An- drej Karpathy, Aditya Khosla, Michael Bernstein, Alexander Berg, and Li Fei-Fei. 2015. ImageNet Large Scale Visual Recognition Challenge. IJCV.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "A Dataset and Taxonomy for Urban Sound Research", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Salamon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Jacoby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Pablo" |
|
], |
|
"last": "Bello", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ACM MM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Salamon, Christopher Jacoby, and Juan Pablo Bello. 2014. A Dataset and Taxonomy for Urban Sound Research. In ACM MM.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "Learning to Localize Sound Source in Visual Scenes", |
|
"authors": [ |
|
{ |
|
"first": "Arda", |
|
"middle": [], |
|
"last": "Senocak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tae-Hyun", |
|
"middle": [], |
|
"last": "Oh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junsik", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arda Senocak, Tae-Hyun Oh, Junsik Kim, Ming-Hsuan Yang, and In So Kweon. 2018. Learning to Localize Sound Source in Visual Scenes. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Bidirectional Attention Flow for Machine Comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Minjoon", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2017. Bidirectional Attention Flow for Machine Comprehension. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning", |
|
"authors": [ |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. 2018. Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan and Andrew Zisserman. 2015. Very Deep Convolutional Networks for Large-Scale Im- age Recognition. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Dropout: A Simple Way to Prevent Neural Networks from Overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "JMLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: A Simple Way to Prevent Neural Networks from Overfitting. In JMLR.", |
|
"links": null |
|
}, |
|
"BIBREF69": { |
|
"ref_id": "b69", |
|
"title": "Detection and Classification of Acoustic Scenes and Events", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Stowell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Giannoulis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emmanouil", |
|
"middle": [], |
|
"last": "Benetos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathieu", |
|
"middle": [], |
|
"last": "Lagrange", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Plumbley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "IEEE Transactions on Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Stowell, Dimitrios Giannoulis, Emmanouil Bene- tos, Mathieu Lagrange, and Mark D Plumbley. 2015. Detection and Classification of Acoustic Scenes and Events. In IEEE Transactions on Multimedia.", |
|
"links": null |
|
}, |
|
"BIBREF70": { |
|
"ref_id": "b70", |
|
"title": "Learning Spatiotemporal Features with 3D Convolutional Networks", |
|
"authors": [ |
|
{ |
|
"first": "Du", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Lubomir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Bourdev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lorenzo", |
|
"middle": [], |
|
"last": "Fergus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manohar", |
|
"middle": [], |
|
"last": "Torresani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Paluri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Du Tran, Lubomir D Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. 2015. Learning Spa- tiotemporal Features with 3D Convolutional Net- works. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF71": { |
|
"ref_id": "b71", |
|
"title": "CIDEr: Consensus-based Image Description Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Ramakrishna", |
|
"middle": [], |
|
"last": "Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. 2015. CIDEr: Consensus-based Image De- scription Evaluation. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF72": { |
|
"ref_id": "b72", |
|
"title": "Watch, Listen, and Describe: Globally and Locally Aligned Cross-Modal Attentions for Video Captioning", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan-Fang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Wang, Yuan-Fang Wang, and William Yang Wang. 2018. Watch, Listen, and Describe: Globally and Locally Aligned Cross-Modal Attentions for Video Captioning. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF73": { |
|
"ref_id": "b73", |
|
"title": "Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Pete", |
|
"middle": [], |
|
"last": "Warden", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.03209" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pete Warden. 2018. Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition. In arXiv:1804.03209.", |
|
"links": null |
|
}, |
|
"BIBREF74": { |
|
"ref_id": "b74", |
|
"title": "MSR-VTT: A Large Video Description Dataset for Bridging Video and Language", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Rui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Xu, Tao Mei, Ting Yao, and Yong Rui. 2016. MSR-VTT: A Large Video Description Dataset for Bridging Video and Language. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF75": { |
|
"ref_id": "b75", |
|
"title": "Large-Scale Weakly Supervised Audio Classification using Gated Convolutional Neural Network", |
|
"authors": [ |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiuqiang", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenwu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Plumbley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yong Xu, Qiuqiang Kong, Wenwu Wang, and Mark D Plumbley. 2018. Large-Scale Weakly Supervised Audio Classification using Gated Convolutional Neural Network. In ICASSP.", |
|
"links": null |
|
}, |
|
"BIBREF76": { |
|
"ref_id": "b76", |
|
"title": "Describing Videos by Exploiting Temporal Structure", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atousa", |
|
"middle": [], |
|
"last": "Torabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Ballas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Pal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Larochelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Yao, Atousa Torabi, Kyunghyun Cho, Nicolas Bal- las, Christopher Pal, Hugo Larochelle, and Aaron Courville. 2016. Describing Videos by Exploiting Temporal Structure. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF77": { |
|
"ref_id": "b77", |
|
"title": "Image Captioning with Semantic Attention", |
|
"authors": [ |
|
{ |
|
"first": "Quanzeng", |
|
"middle": [], |
|
"last": "You", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hailin", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaowen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiebo", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. 2016. Image Captioning with Se- mantic Attention. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF78": { |
|
"ref_id": "b78", |
|
"title": "From Image Descriptions to Visual Denotations: New Similarity Metrics for Semantic Inference over Event Descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alice", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micah", |
|
"middle": [], |
|
"last": "Hodosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "TACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. 2014. From Image Descriptions to Visual Denotations: New Similarity Metrics for Se- mantic Inference over Event Descriptions. In TACL.", |
|
"links": null |
|
}, |
|
"BIBREF79": { |
|
"ref_id": "b79", |
|
"title": "End-to-End Concept Word Detection for Video Captioning, Retrieval, and Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "Youngjae", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyungjin", |
|
"middle": [], |
|
"last": "Ko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jongwook", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunhee", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Youngjae Yu, Hyungjin Ko, Jongwook Choi, and Gun- hee Kim. 2017. End-to-End Concept Word Detec- tion for Video Captioning, Retrieval, and Question Answering. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF80": { |
|
"ref_id": "b80", |
|
"title": "The Sound of Pixels", |
|
"authors": [ |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuang", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Rouditchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Vondrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Mcdermott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hang Zhao, Chuang Gan, Andrew Rouditchenko, Carl Vondrick, Josh McDermott, and Antonio Torralba. 2018. The Sound of Pixels. In ECCV.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Audio Classification] rumble | vehicle | speech | car | outside [Video Captioning] A bus passing by with some people walking by in the afternoon.[Audio Captioning] A muffled rumble with man and woman talking in the background while a siren blares in the distance.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Comparison of audio captioning with audio classification and video captioning tasks.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"text": "The AMT interface for audio annotation.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "MSR-VTTA black and white video of about actors", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"text": "Comparison between two video captioning datasets and AudioCaps. The text from (a) LSMDC(Rohrbach et al., 2017) and (b) MSR-VTT(Xu et al., 2016) includes multiple visually grounded vocabularies (indicated in blue), whereas the text from (c) AudioCaps contains vocabularies relying on auditory cues (in red).", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"text": "The audio captioning model with top-down multi-scale encoder and aligned semantic attention.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF6": { |
|
"text": "The frequencies of annotated instances per category (i.e. word labels) for AudioCaps.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"text": "", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td/><td colspan=\"2\">MSR-VTT</td><td colspan=\"2\">AudioCaps</td></tr><tr><td>Methods</td><td colspan=\"4\">METEOR CIDEr METEOR CIDEr</td></tr><tr><td>MFCC-LSTM</td><td>21.4</td><td>19.2</td><td>18.2</td><td>49.3</td></tr><tr><td>SoundNet-LSTM</td><td>20.0</td><td>14.7</td><td>17.0</td><td>43.4</td></tr><tr><td>VGGish-LSTM</td><td>22.8</td><td>26.1</td><td>19.3</td><td>50.9</td></tr><tr><td>C3D-LSTM</td><td>24.8</td><td>36.8</td><td>15.9</td><td>42.7</td></tr><tr><td>Gap (Audio -Video)</td><td>-2.0</td><td>-10.7</td><td>+3.4</td><td>+8.2</td></tr></table>", |
|
"text": "Upper-bound of aligned semantic attention by language similarity metrics.", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>: Comparison of captioning results between</td></tr><tr><td>video-based and audio-based datasets. The first three</td></tr><tr><td>methods perform captioning using only audios while</td></tr><tr><td>the last method C3D-LSTM, only use videos. The</td></tr><tr><td>gaps empirically show how much AudioCaps is audio-</td></tr><tr><td>oriented in contrast to MSR-VTT.</td></tr></table>", |
|
"text": "", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |