|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:46:45.819328Z" |
|
}, |
|
"title": "A Benchmark for Structured Procedural Knowledge Extraction from Cooking Videos", |
|
"authors": [ |
|
{ |
|
"first": "Frank", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Xu", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Botian", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Junyi", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Southern California", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bisk", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Carnegie", |
|
"middle": [ |
|
"Mellon" |
|
], |
|
"last": "University", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Microsoft", |
|
"middle": [], |
|
"last": "Research", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Watching instructional videos are often used to learn about procedures. Video captioning is one way of automatically collecting such knowledge. However, it provides only an indirect, overall evaluation of multimodal models with no finer-grained quantitative measure of what they have learned. We propose instead, a benchmark of structured procedural knowledge extracted from cooking videos. This work is complementary to existing tasks, but requires models to produce interpretable structured knowledge in the form of verb-argument tuples. Our manually annotated open-vocabulary resource includes 356 instructional cooking videos and 15,523 video clip/sentence-level annotations. Our analysis shows that the proposed task is challenging and standard modeling approaches like unsupervised segmentation, semantic role labeling, and visual action detection perform poorly when forced to predict every action of a procedure in structured form.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Watching instructional videos are often used to learn about procedures. Video captioning is one way of automatically collecting such knowledge. However, it provides only an indirect, overall evaluation of multimodal models with no finer-grained quantitative measure of what they have learned. We propose instead, a benchmark of structured procedural knowledge extracted from cooking videos. This work is complementary to existing tasks, but requires models to produce interpretable structured knowledge in the form of verb-argument tuples. Our manually annotated open-vocabulary resource includes 356 instructional cooking videos and 15,523 video clip/sentence-level annotations. Our analysis shows that the proposed task is challenging and standard modeling approaches like unsupervised segmentation, semantic role labeling, and visual action detection perform poorly when forced to predict every action of a procedure in structured form.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Instructional videos are a convenient way to learn a new skill. Although learning from video seems natural to humans, it requires identifying and understanding procedures and grounding them to the real world. In this paper, we propose a new task and dataset for extracting procedural knowledge into a fine-grained structured representation from multimodal information contained in a largescale archive of open-vocabulary narrative videos with noisy transcripts. While there is a significant amount of related work (summarized in \u00a73 & 7), to our knowledge there is no dataset similar in scope, with previous attempts focusing only on a single 4 i 'm heating it up with a medium, medium high flame, and i 'm going to put some bacon in there and fry it up. 5 and this is not a diet recipe. 6 sorry they making clam chowder. 7 an eye you can see the photo montage before this what i did cool. 8 somehow i fried some bacon.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "if i remove the bacon after was nice and chris then i added some chopped or diced, celery and onions and then i added a stick of butter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "9", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "10 i set a stick of butter, and i 'm going to add a quarter cup of cornstarch. modality (e.g., text only (Kiddon et al., 2015) or video only Alayrac et al., 2016) ), using closed-domain taxonomies (Tang et al., 2019) , or lacking structure in the procedural representation (Zhou et al., 2018a) . In our task, given a narrative video, say a cooking video on YouTube about making clam chowder as shown in Figure 1 , our goal is to extract a series of tuples representing the procedure, e.g. (heat, cast iron skillet), (fry, bacon, with heated skillet), etc. We created a manually annotated, large test dataset for evaluation of the task, including over 350 instructional cooking videos along with over 15,000 English sentences in the transcripts spanning over 89 recipe types. This verb-argument structure using arbitrary textual phrases is motivated by open information extraction (Schmitz et al., 2012; Fader et al., 2011) , but focuses on procedures rather than entity-entity relations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 126, |
|
"text": "(Kiddon et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 162, |
|
"text": "Alayrac et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 216, |
|
"text": "(Tang et al., 2019)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 293, |
|
"text": "(Zhou et al., 2018a)", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 880, |
|
"end": 902, |
|
"text": "(Schmitz et al., 2012;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 903, |
|
"end": 922, |
|
"text": "Fader et al., 2011)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 411, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "9", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This task is challenging with respect to both video and language understanding. For video, it requires understanding of video contents, with a spe-cial focus on actions and procedures. For language, it requires understanding of oral narratives, including understanding of predicate-argument structure and coreference. In many cases it is necessary for both modalities to work together, such as when resolving null arguments necessitates the use of objects or actions detected from video contents in addition to transcripts. For example, the cooking video host may say \"just a pinch of salt in\", while adding some salt into a boiling pot of soup, in which case inferring the action \"add\" and its argument \"pot\" requires visual understanding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "9", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Along with the novel task and dataset, we propose several baseline approaches that extract structure in a pipelined fashion. These methods first identify key clips/sentences using video and transcript information with unsupervised and supervised multimodal methods, then extract procedure tuples from the utterances and/or video of these key clips. On the utterances side, we utilize an existing state-of-the-art semantic role labeling model (Shi and Lin, 2019) , with the intuition that semantic role labeling captures the verb-argument structures of a sentence, which would be directly related to procedures and actions. On the video side, similarly, we utilize existing state-of-the-art video action/object recognition model trained in kitchen settings to further augment utterance-only extraction results. The results are far from perfect, demonstrating that the proposed task is challenging and that structuring procedures requires more than just state-of-the-art semantic parsing or video action recognition.", |
|
"cite_spans": [ |
|
{ |
|
"start": 442, |
|
"end": 461, |
|
"text": "(Shi and Lin, 2019)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "9", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We show a concrete example of our procedural knowledge extraction task in Figure 1 . Our ultimate goal is to automatically map unstructured instructional video (clip and utterances) to structured procedures, defining what actions should be performed on which objects, with what arguments and in what order. We define the input to such an extraction system:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 82, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Definition", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 Task R, e.g. \"Create Chicken Parmesan\" and instructional video V R describing the procedure to achieve task R, e.g. a video titled \"Chicken Parmesan -Let's Cook with ModernMom\". 1 \u2022 A sequence of n sentences", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Definition", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "T R = {t 0 , t 1 , ..., t n }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Definition", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "representing video V R 's corresponding transcript. According to the time stamps of the transcript sentences, the video is also segmented into n clips V R = {v 0 , v 1 , ..., v n } accordingly to align with the sentences in the transcript T R .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Definition", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 A sequence of m procedure tuples S R = {s 0 , s 1 , ..., s m } describing the key steps to achieve task R according to instructional video V R . \u2022 An identified list of key video clips and corresponding", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The output will be:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "sentences V R \u2286 V R , to which proce- dures in S R are grounded.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The output will be:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each procedural tuple s j = (verb, arg 1 , ..., arg k ) \u2208 S R consists of a verb phrase and its arguments. Only the \"verb\" field is required, and thus the tuple size ranges from 1 to k + 1. All fields can be either a word or a phrase. Not every clip/sentence describes procedures, as most videos include an intro, an outro, nonprocedural narration, or off-topic chit-chat. Key clips V R are clips associated with one or more procedures in P R , with some clips/sentences associated with multiple procedure tuples. Conversely, each procedure tuple will be associated with only a single clip/sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The output will be:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While others have created related datasets, they fall short on key dimensions which we remedy in our work. Specifically, In Table 1 we compare to AllRecipes (Kiddon et al., 2015) (Tang et al., 2019) , How2 (Sanabria et al., 2018) , HAKE and TACOS (Regneri et al., 2013) . Additional details about datasets are included in the Appendix A. 2 In summary, none have both structured and open extraction annotations for the procedural knowledge extraction task, since most focus on either video summarization/captioning or action localization/classification. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 178, |
|
"text": "(Kiddon et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 198, |
|
"text": "(Tang et al., 2019)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 229, |
|
"text": "(Sanabria et al., 2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 269, |
|
"text": "(Regneri et al., 2013)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 131, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset & Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(AR), YouCook2 (Zhou et al., 2018b) (YC2), CrossTask (Zhukov et al., 2019) (CT), COIN", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset & Analysis", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To address the limitations of existing datasets, we created our own evaluation dataset by annotating structured procedure knowledge given the video and transcript. Native English-speakers annotated four videos per recipe type (e.g. clam chowder, pizza margherita, etc.) in the YouCook2 dataset into the structured form presented in \u00a72 (totaling 356 videos). Annotators selected key clips as important steps and extracted corresponding fields to fill in verbs and arguments. Filling in the fields with the original tokens was preferred but not required (e.g., in cases of coreference and ellipsis). The result is a series of video clips labeled with procedural structured knowledge as a sequence of steps s j and series of short sentences describing the procedure. Figure 2 shows the user interface of annotation tool. The process is divided into 3 questions per clip: Q1: Determine if the video clip is a key step if: (1) the clip or transcript contains at least one action; (2) the action is required for accomplishing the task (i.e. not a self introduction); and (3) for if a clip duplicates a previous key clip, choose the one with clearer visual and textual signals (e.g. without coreference, etc.). Q2: For each key video clip, annotate the key procedural tuples. We have annotators indicate which actions are both seen and mentioned by the instructor in the video. The actions should correspond to a verb and its arguments from the original transcript except in the case of ellipsis or coreference where they have to refer to earlier phrases based on the visual scene. Q3: Construct a short fluent sentence from the annotated tuples for the given video clip.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 764, |
|
"end": 772, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Creation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We have two expert annotators and a professional labeling supervisor for quality control and deciding the final annotations. To improve the data quality, the supervisor reviewed all labeling results, and applied several heuristic rules to find anomalous records for further correction. The heuristic is to check the annotated verb/arguments that are not found in corresponding transcript text. Among these anomalies, the supervisor checks the conflicts between the two annotators. 25% of all annotations were modified as a result. On average annotators completed task Q1 at 240 sentences (clips) per hour and task Q2 and Q3 combined at 40 sentences per hour. For Q1, we observe an inter-annotator agreement with Cohen's Kappa of 0.83. 3 Examples are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 759, |
|
"end": 766, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Creation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Overall, the dataset contains 356 videos with 15,523 video clips/sentences, among which 3,569 clips are labeled as key steps. Sentences average 16.3 tokens, and the language style is oral English. For structured procedural annotations, there are 347 unique verbs and 1,237 unique objects in all. Statistics are shown in Table 2 . Figure 3 lists the most commonly appearing verbs and entities. The action add is most frequently performed, and the entities salt and onions are the most popular ingredients.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 327, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 338, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Procedure summary Verb Arguments so we've placed the dough directly into the caputo flour that we import from italy. place dough in caputo flour place dough caputo flour we just give (ellipsis) a squish with our palm and make it flat in the center.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transcript sentence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "squish dough with palm squish dough with palm flatten center of dough flatten center of dough so will have to rotate it every thirty to forty five seconds ... rotate pizza every 30-45 seconds rotate pizza every 30-45 seconds Table 3 : Annotations of structured procedures and summaries. Coreference and ellipsis are marked with italics and are resolved into referred phrases also linked back in the annotations. See Appendix (Table 6) In nearly 30% of annotations, some verbs and arguments cannot be directly found in the transcript. An example is \"(add) some salt into the pot\", and we refer to this variety of absence as ellipsis. Arguments not mentioned explicitly are mainly due to (1) pronoun references, e.g. \"put it (fish) in the pan\"; (2) ellipsis, where the arguments are absent from the oral language, e.g. \"put the mixture inside\" where the argument \"oven\" is omitted. The details can be found in Table 2 . The coreferences and ellipsis phenomena add difficulty to our task, and indicate the utility of using multimodal information from the video signal and contextual procedural knowledge for inference.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 232, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 434, |
|
"text": "(Table 6)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 908, |
|
"end": 915, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Transcript sentence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this and the following section, we describe our two-step pipeline for procedural knowledge extraction (also in Figure 4 ). This section describes the first stage of determining which clips are \"key clips\" that contribute to the description of the procedure. We describe several key clip selection models, which consume the transcript and/or the video within the clip and decide whether it is a key clip.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 122, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 1: Key Clip Selection", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Given our unsupervised setting, we first examine two heuristic parsing-based methods that focus on the transcript only, one based on semantic role labeling (SRL) and the other based on an unsupervised segmentation model Kiddon et al. (2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 240, |
|
"text": "Kiddon et al. (2015)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parsing-Based Heuristic Baselines", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Before introducing heuristic baselines, we note that having a lexicon of domain-specific actions will be useful, e.g., for filtering pretrained model outputs, or providing priors to the unsupervised model described later. In our cooking domain, these actions can be expected to consist mostly of verbs related to cooking actions and procedures. Observing recipe datasets such as AllRecipes (Kiddon et al., 2015) or WikiHow (Miech et al., 2019; , we find that they usually use imperative and concise sentences for procedures and the first word is usually the action verb like \"add\", e.g., add some salt into the pot. We thus construct a cooking lexicon by aggregating the frequently appearing verbs as the first word from All-Recipes, with frequency over a threshold of 5. We further filter out words that have no verb synsets in WordNet (Miller, 1995) . Finally we manually filter out noisy or too general verbs like \"go\". Note that when applying to other domains, the lexicon can be built following a similar process of first finding a domain-specific corpus with simple and formal instructions, and then obtaining the lexicon by aggregation and filtering. Semantic role labeling baselines. One intuitive trigger in the transcript for deciding whether the sentence is a key step should be the action words, i.e. the verbs. In order to identify these action words we use semantic role labeling (Gildea and Jurafsky, 2002) , which analyzes natural language sentences to extract information about \"who did what to whom, when, where and how?\" The output is in the form of predicates and their respective arguments that acts as semantic roles, where the verb acts as the root (head) of the parse. We run a strong semantic role labeling model (Shi and Lin, 2019) included in the AllenNLP toolkit (Gardner et al., 2018) on each sentence in the transcript. From the output we get a set of verbs for each of the sentences. 4 Because not all verbs in all sentences represent actual key actions for the procedure, we additionally filter the verbs with the heuristically created cooking lexicon above, counting a clip as a key clip only if at least one of the SRL-detected verbs is included in the lexicon. Unsupervised recipe segmentation baseline (Kiddon et al., 2015) . The second baseline is based on the outputs of the unsupervised recipe sentence segmentation model in Kiddon et al. (2015) . Briefly speaking, the model is a generative probabilistic model where verbs and arguments, together with their numbers, are modeled as latent variables. It uses a bigram model for string selection. It is trained on the whole transcript corpus of YouCook2 videos iteratively for 15 epochs using a hard EM approach before the performance starts to converge. The count of verbs in the lexicon created in \u00a74.1 is provided as a prior through initialization. We then do inference to parse the transcripts in our dataset using the trained model. Following the same heuristics as the SRL outputs, we treat sentences with non-empty parsed predicates after lexical filtering as key sentences, and those without as negatives.", |
|
"cite_spans": [ |
|
{ |
|
"start": 390, |
|
"end": 411, |
|
"text": "(Kiddon et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 443, |
|
"text": "(Miech et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 837, |
|
"end": 851, |
|
"text": "(Miller, 1995)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1394, |
|
"end": 1421, |
|
"text": "(Gildea and Jurafsky, 2002)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1738, |
|
"end": 1757, |
|
"text": "(Shi and Lin, 2019)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 1791, |
|
"end": 1813, |
|
"text": "(Gardner et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1915, |
|
"end": 1916, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2238, |
|
"end": 2259, |
|
"text": "(Kiddon et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 2364, |
|
"end": 2384, |
|
"text": "Kiddon et al. (2015)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parsing-Based Heuristic Baselines", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Next, we implement a supervised neural network model that incorporates visual information, which we have posited before may be useful in the face of incomplete verbal utterances. We extract the features of the sentence and each video frame using pretrained feature extractors respectively. Then we perform attention (Bahdanau et al., 2014) over each frame feature, using the sentence as a query, in order to acquire the representation of the video clip. Finally, we combine the visual and textual features to predict whether the input is a key clip. The model is trained on a general domain instructional key clip selection dataset with no overlap with ours, and our annotated dataset is used for evaluation only. Additional details about the model and training dataset are included in Appendix B.", |
|
"cite_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 339, |
|
"text": "(Bahdanau et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Selection Baseline", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "With the identified key clips and corresponding transcript sentences, we proceed to the second stage that performs clip/sentence-level procedural knowledge extraction from key clips. In this stage, the extraction is done from clips that are identified at first as \"key clips\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 2: Structured Knowledge Extraction", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We first present two baselines to extract structured procedures using transcripts only, similarly to the key-clip identification methods described in \u00a74.1. Semantic role labeling. For the first baseline, we use the same pretrained SRL model introduced in \u00a74.1 to conduct inference on the sentences in key clips identified from stage 1. Because they consist of verb-argument structures, the outputs of the SRL model are well aligned with the task of extracting procedural tuples that identify actions and their arguments. However, not all outputs from the SRL model are the structured procedural knowledge we aim to extract. For example, in the sentence \"you 're ready to add a variety of bell peppers\" from the transcript, the outputs from SRL model contains two parses with two predicates, \"are\" and \"add\", where only the latter is actually part of the procedure. To deal with this issue we first perform filtering similar to that used in stage 1, removing parses with predicates (verbs) outside of the domain-specific action lexicon we created in \u00a74.1. Next, we filter out irrelevant arguments in the parse. For example, the parse from the SRL model for sentence \"I add a lot of pepper because I love it.\" after filtering out irrelevant verb \"love\" is \"[ARG0: I] [V: add] [ARG1: a lot of pepper] [ARGM-CAU: because I love it]\", some arguments such as ARG0 and ARGM-CAU are clearly not contributing to the procedure. We provide a complete list of the filtered argument types in Appendix C. Unsupervised recipe segmentation (Kiddon et al., 2015) . The second baseline is to use the same trained segmentation model as in \u00a74.1 to segment selected key transcript sentences into verbs and arguments. We treat segmented predicates in the key sentence as procedural verbs, and segmented predicate arguments plus preposition arguments as procedural arguments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1524, |
|
"end": 1545, |
|
"text": "(Kiddon et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction From Utterances", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We also examine a baseline that utilizes two forms of visual information in videos: actions and objects. We predict both verbs and nouns of a given video clip via a state-of-the-art action detection model TSM , 5 trained on the EpicKitchen (Damen et al., 2018a) dataset. 6 For each video, we extract 5-sec video segments and feed into the action detection model. The outputs of the models are in a predefined set of labels of verbs (actions) and nouns (objects). 7 We directly combine the outputs from the model on each video segment, aggregate and temporally align them with key clips/sentences, forming the final output.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 261, |
|
"text": "(Damen et al., 2018a)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 272, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 464, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction From Video", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Finally, to take advantage of the fact that utterance and video provide complementary views, we perform multimodal fusion of the results of both of these model varieties. We adopt a simple method of fusion by taking the union of the verbs/actions and arguments/objects respectively from the best performing utterance-only model and the visual detection model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Utterance and Video Fusion", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We propose evaluation metrics and provide evaluation results on our annotated dataset for both of the two stages: key clip selection and structured procedural extraction. Detailed reproducibility information about the experiments are in Appendix F. Besides quantitative evaluation and qualitative evaluations, we also analyze the key challenges of this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this section, we evaluate results of the key clip selection described in \u00a74. We evaluate using the accuracy, precision, recall and F1 score for the binary classification problem of whether a given clip in the video is a key clip. The results are shown in Table 4 . We compare parsing-based heuristic models and supervised neural models, with ablations (model details in Appendix B). From the experimental results in Table 4 , we can see that:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 266, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 420, |
|
"end": 427, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 1: Key Clip Selection", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "1. Unsupervised heuristic methods perform worse than neural models with training data. This is despite the fact that the dataset used for training neural models has a different data distribution and domain from the test set. 2. Among heuristic methods, pretrained SRL is better than Kiddon et al. (2015) even though the second is trained on transcript text from YouCook2 videos. One possible reason is that the unsupervised segmentation method was specially designed for recipe texts, which are mostly simple, concise and imperative sentences found in recipe books, while the transcript is full of noise and tends to have longer, more complicated, and oral-style English. 3. Post-processing significantly improves the SRL model, showing that filtering unrelated arguments and incorporating the cooking lexicon helps, especially with reducing false positives. 4. Among neural method ablations, the model using only visual features performs worse than that using only text features. The best model for identifying key clips among proposed baselines uses both visual and text information in the neural model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 303, |
|
"text": "Kiddon et al. (2015)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 1: Key Clip Selection", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Besides quantitative evaluation, we analyzed key clip identification results and found a number of observations. First, background introductions, advertisements for the YouTube channel, etc. can be relatively well classified due to major differences both visually and textually from procedural clips. Second, alignment and grounding between the visual and textual domains is crucial for key clip prediction, yet challenging. For example, the clip with the transcript sentence \"add more pepper according to your liking\" is identified as a key clip. However, it is in fact merely a suggestion made by the speaker about an imaginary scenario, rather than a real action performed and thus should not be regarded as a key procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 1: Key Clip Selection", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In this stage, we perform key clip-level evaluation for structured procedural knowledge extraction by matching the ground truth and predicted structures with both exact match and two fuzzy scoring strategies. To better show how stage 1 performance affects the whole pipeline, we evaluate on both ground truth (oracle) and predicted key clips. Similarly to the evaluation of key clip selection, we compare the parsing-based methods ( \u00a75.1), as well as purposing the action detection results from video signals for our task. Besides, we compare utterance-only and video-only baselines with our naive multi-modal fusion method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 2: Structured Procedure Extraction", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We evaluate with respect to precision, recall and the F1 measure. Similarly to the evaluation method used for SRL (Carreras and M\u00e0rquez, 2004) , precision (P) is the proportion of verbs or arguments predicted by a model which are correct, i.e. T P/#predicted where T P is the number of true positives. Recall (R) is the proportion of correct verbs or arguments which are predicted by a model, i.e. T P/#gold. The key here is how to calculate T P and we propose 3 methods: exact match, fuzzy matching, and partial fuzzy matching. The first is straight forward, we count true positives if and only if the predicted phrase is an exact string match in the gold phrases. However, because our task lies in the realm of open phrase extraction without predefined labels, it is unfairly strict to count only the exact string matches as T P . Also by design, the gold extraction results cannot always be found in the original transcript sentence (refer to \u00a73.2), so we are also unable to use token-based metrics as in sequence tagging (Sang and De Meulder, 2003) , or span-based metrics as in some question answering tasks (Rajpurkar et al., 2016) . Thus for the second metric we call \"fuzzy\", we leverage edit distance to enable fuzzy matching and assign a \"soft\" score for T P . In some cases, the two strings of quite different lengths will hurt the fuzzy score due to the nature of edit distance, even though one string is a substring of another. To get around this, we propose a third metric, \"partial fuzzy\" to get the score of the best matching substring with the length of the shorter string in comparison. Note that this third metric will bias towards shorter, correct phrases and thus we should have a holistic view of all 3 metrics during the evaluation. Details of two fuzzy metrics are described in Appendix D. Table 5 illustrates evaluation results:", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 142, |
|
"text": "(Carreras and M\u00e0rquez, 2004)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1025, |
|
"end": 1052, |
|
"text": "(Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 1113, |
|
"end": 1137, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1814, |
|
"end": 1821, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 2: Structured Procedure Extraction", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "1. Argument extraction is much more challenging compared to verb extraction, according the results: arguments contain more complex types of phrases (e.g. objects, location, time, etc.) and are longer in length. It is hard to identify complex arguments with our current heuristic or unsupervised baselines and thus the need for better supervised or semi-supervised models. 2. Heuristic SRL methods perform better than the unsupervised segmentation model even though the second is trained on our corpus. This demonstrates the generality of SRL models, but the heuristics applied at the output of SRL models still improve the performance by reducing false positives. 3. The visual-only method performs the worst, mainly because of the domain gap between visual detection model outputs and our annotated verbs and arguments. Other reasons include: the closed label set predefined in EpicKitchen; challenges in domain transferring from closed to open extraction; different video data distribution between EpicKitchen (for training) and our dataset (YouCook2, for testing); limited performance of video detection model itself. 4. Naive multimodal fusion leads to an overall performance drop to below the utterance-only model, partly due to the differences in video data distribution and domain, as well as the limitation of the predefined set of verbs and nouns in the EpicKitchen dataset, implying the need for better multimodal fusion method. Unsurprisingly, the recall for verb extraction raises after the fusion, suggesting that action detection in videos helps with the coverage. The drop in argument extraction suggests the complexity of arguments in our open extraction setting: it should be more than mere object detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 2: Structured Procedure Extraction", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Besides quantitative results, we also showcase qualitative analysis of example extraction outputs in Appendix E. From both, we suggest that there are two key challenges moving forward: Verb extraction: We find that verb ellipsis is common in transcripts. The transcript text contains sentences where key action \"verbs\" do not have verb part-of-speech in the sentence. For example, in the sentence \"give it a flip ...\" with the annotation (\"flip\", \"pancake\"), the model detects \"give\" as the verb rather than \"flip\". Currently all our baselines are highly reliant on a curated lexicon for verb selection and thus such cases will get filtered out. How to deal with such cases with general verbs like make, give, do remains challenging and requires extracting from the contexts. Argument extraction: Speech-to-text errors are intrinsic in automatically acquired transcripts and cause problems during parsing that cascade. Examples are that \"add flour\" being recognized as \"add flower\" and \"sriracha sauce\" being recognized as \"sarrah cha sauce\" causing wrong extraction outputs. Coreference and ellipsis are also challenging and hurting current benchmark performance, as our baselines do not tackle any of these explicitly. Visual co-reference and language grounding (Huang et al., 2018 (Huang et al., , 2017 ) provides a feasible method for us to tackle these cases in the future.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1264, |
|
"end": 1283, |
|
"text": "(Huang et al., 2018", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1284, |
|
"end": 1305, |
|
"text": "(Huang et al., , 2017", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction Stage 2: Structured Procedure Extraction", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Text-based procedural knowledge extraction. Procedural text understanding and knowledge extraction (Chu et al., 2017; Park and Motahari Nezhad, 2018; Kiddon et al., 2015; Jermsurawong and Habash, 2015; Liu et al., 2016; Long et al., 2016; Maeta et al., 2015; Malmaud et al., 2014; Artzi and Zettlemoyer, 2013; Kuehne et al., 2017) has been studied for years on step-wise textual data such as WikiHow. Chu et al. (2017) extracted open-domain knowledge from how-to communities. Recently also studied to adopt the well-written how-to data as weak supervision for instructional video understanding. Unlike existing work on action graph/dependency extraction (Kiddon et al., 2015; Jermsurawong and Habash, 2015) , our approach differs as we extract knowledge from the visual signals and transcripts directly, not from imperative recipe texts. Instructional video understanding. Beyond image semantics (Yatskar et al., 2016) , unlike existing tasks for learning from instructional video (Zhou et al., 2018c; Tang et al., 2019; Alayrac et al., 2016; Song et al., 2015; Sener et al., 2015; Huang et al., 2016; Sun et al., 2019b,a; Plummer et al., 2017; Palaskar et al., 2019) , combining video & text information in procedures (Yagcioglu et al., 2018; Fried et al., 2020) , visual-linguistic reference resolution (Huang et al., 2018 (Huang et al., , 2017 , visual planning (Chang et al., 2019) , joint learning of object and actions Richard et al., 2018; Gao et al., 2017; Damen et al., 2018b) , pretraining joint embedding of high level sentence with video clips (Sun et al., 2019b; Miech et al., 2019) , our task proposal requires explicit structured knowledge tuple extraction. In addition to closely related work ( \u00a73) there is a wide literature (Malmaud et al., 2015; Zhou et al., 2018b; Ushiku et al., 2017; Nishimura et al., 2019; Tang et al., 2019; Huang et al., 2016; Ushiku et al., 2017 ) that aims to predict/align dense procedural captions given the video, which are the most similar works to ours. Zhou et al. (2018c) extracted temporal procedures and then generated captioning for each procedure. Sanabria et al. (2018) proposes a multimodal abstractive summarization for how-to videos with either human labeled or speech-to-text transcript. Alayrac et al. (2016) also introduces an unsupervised step learning method from instructional videos. Inspired by cross-task sharing , which is a weakly supervised method to learn shared actions between tasks, fine grained action and entity are important for sharing similar knowledge between various tasks. We focus on structured knowledge of fine-grained actions and entities.Visual-linguistic coreference resolution (Huang et al., 2018 (Huang et al., , 2017 is among one of the open challenges for our proposed task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 117, |
|
"text": "(Chu et al., 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 149, |
|
"text": "Park and Motahari Nezhad, 2018;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 170, |
|
"text": "Kiddon et al., 2015;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 201, |
|
"text": "Jermsurawong and Habash, 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 219, |
|
"text": "Liu et al., 2016;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 220, |
|
"end": 238, |
|
"text": "Long et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 258, |
|
"text": "Maeta et al., 2015;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 280, |
|
"text": "Malmaud et al., 2014;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 309, |
|
"text": "Artzi and Zettlemoyer, 2013;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 330, |
|
"text": "Kuehne et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 418, |
|
"text": "WikiHow. Chu et al. (2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 675, |
|
"text": "(Kiddon et al., 2015;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 676, |
|
"end": 706, |
|
"text": "Jermsurawong and Habash, 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 896, |
|
"end": 918, |
|
"text": "(Yatskar et al., 2016)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 981, |
|
"end": 1001, |
|
"text": "(Zhou et al., 2018c;", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 1002, |
|
"end": 1020, |
|
"text": "Tang et al., 2019;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 1021, |
|
"end": 1042, |
|
"text": "Alayrac et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1043, |
|
"end": 1061, |
|
"text": "Song et al., 2015;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 1062, |
|
"end": 1081, |
|
"text": "Sener et al., 2015;", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 1082, |
|
"end": 1101, |
|
"text": "Huang et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1102, |
|
"end": 1122, |
|
"text": "Sun et al., 2019b,a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1123, |
|
"end": 1144, |
|
"text": "Plummer et al., 2017;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1145, |
|
"end": 1167, |
|
"text": "Palaskar et al., 2019)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1219, |
|
"end": 1243, |
|
"text": "(Yagcioglu et al., 2018;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 1244, |
|
"end": 1263, |
|
"text": "Fried et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1305, |
|
"end": 1324, |
|
"text": "(Huang et al., 2018", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1325, |
|
"end": 1346, |
|
"text": "(Huang et al., , 2017", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1365, |
|
"end": 1385, |
|
"text": "(Chang et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1425, |
|
"end": 1446, |
|
"text": "Richard et al., 2018;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 1447, |
|
"end": 1464, |
|
"text": "Gao et al., 2017;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1465, |
|
"end": 1485, |
|
"text": "Damen et al., 2018b)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1556, |
|
"end": 1575, |
|
"text": "(Sun et al., 2019b;", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 1576, |
|
"end": 1595, |
|
"text": "Miech et al., 2019)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1742, |
|
"end": 1764, |
|
"text": "(Malmaud et al., 2015;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1765, |
|
"end": 1784, |
|
"text": "Zhou et al., 2018b;", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 1785, |
|
"end": 1805, |
|
"text": "Ushiku et al., 2017;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 1806, |
|
"end": 1829, |
|
"text": "Nishimura et al., 2019;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1830, |
|
"end": 1848, |
|
"text": "Tang et al., 2019;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 1849, |
|
"end": 1868, |
|
"text": "Huang et al., 2016;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1869, |
|
"end": 1888, |
|
"text": "Ushiku et al., 2017", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 2003, |
|
"end": 2022, |
|
"text": "Zhou et al. (2018c)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 2103, |
|
"end": 2125, |
|
"text": "Sanabria et al. (2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 2667, |
|
"end": 2686, |
|
"text": "(Huang et al., 2018", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 2687, |
|
"end": 2708, |
|
"text": "(Huang et al., , 2017", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We propose a multimodal open procedural knowledge extraction task, present a new evaluation dataset, produce benchmarks with various methods, and analyze the difficulties in the task. Meanwhile we investigate the limit of existing methods and many open challenges for procedural knowledge acquisition, including: to better deal with cases of coreference and ellipsis in visual-grounded languages; exploit cross-modalities of information with more robust, semi/un-supervised models; potential improvement from structured knowledge in downstream tasks (e.g., video captioning).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions & Open Challenges", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "A common dataset we do not include here is HowTo100M(Miech et al., 2019) as it does not contain any annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the Jaccard ratio between the annotated tokens of two annotators for Q2's agreement. Verb annotations have a higher agreement at 0.77 than that of arguments at 0.72.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The SRL model is used in this stage only as a verb identifier, with other output information used in stage 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/epic-kitchens/ action-models 6 https://epic-kitchens.github.io/2019", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Notably, this contrasts to our setting of attempting to recognize into an open label set, which upper-bounds the accuracy of any model with a limited label set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised learning from narrated instruction videos", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Jean-Baptiste Alayrac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nishant", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Sivic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Laptev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lacoste-Julien", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4575--4583", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jean-Baptiste Alayrac, Piotr Bojanowski, Nishant Agrawal, Josef Sivic, Ivan Laptev, and Simon Lacoste-Julien. 2016. Unsupervised learning from narrated instruction videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4575-4583.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Weakly supervised learning of semantic parsers for mapping instructions to actions", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "49--62", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Artzi and Luke Zettlemoyer. 2013. Weakly su- pervised learning of semantic parsers for mapping instructions to actions. Transactions of the Associa- tion for Computational Linguistics, 1:49-62.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Neural machine translation by jointly learning to align and translate", |
|
"authors": [ |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.0473" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Introduction to the conll-2004 shared task: Semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Eighth Conference on Computational Natural Language Learning (CoNLL-2004) at HLT-NAACL 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "89--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Carreras and Llu\u00eds M\u00e0rquez. 2004. Introduc- tion to the conll-2004 shared task: Semantic role labeling. In Proceedings of the Eighth Confer- ence on Computational Natural Language Learning (CoNLL-2004) at HLT-NAACL 2004, pages 89-97.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Procedure planning in instructional videos", |
|
"authors": [ |
|
{ |
|
"first": "Chien-Yi", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "De-An", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danfei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehsan", |
|
"middle": [], |
|
"last": "Adeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Carlos" |
|
], |
|
"last": "Niebles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chien-Yi Chang, De-An Huang, Danfei Xu, Ehsan Adeli, Li Fei-Fei, and Juan Carlos Niebles. 2019. Procedure planning in instructional videos. ArXiv, abs/1907.01172.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Distilling task knowledge from how-to communities", |
|
"authors": [ |
|
{ |
|
"first": "Niket", |
|
"middle": [], |
|
"last": "Cuong Xuan Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Tandon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 26th International Conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "805--814", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cuong Xuan Chu, Niket Tandon, and Gerhard Weikum. 2017. Distilling task knowledge from how-to com- munities. In Proceedings of the 26th International Conference on World Wide Web, pages 805-814. In- ternational World Wide Web Conferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Scaling egocentric vision: The epic-kitchens dataset", |
|
"authors": [ |
|
{ |
|
"first": "Dima", |
|
"middle": [], |
|
"last": "Damen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazel", |
|
"middle": [], |
|
"last": "Doughty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Farinella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonino", |
|
"middle": [], |
|
"last": "Furnari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evangelos", |
|
"middle": [], |
|
"last": "Kazakos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Moltisanti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Munro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toby", |
|
"middle": [], |
|
"last": "Perrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Price", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wray", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "European Conference on Computer Vision (ECCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, and Michael Wray. 2018a. Scal- ing egocentric vision: The epic-kitchens dataset. In European Conference on Computer Vision (ECCV).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Scaling egocentric vision: The epic-kitchens dataset", |
|
"authors": [ |
|
{ |
|
"first": "Dima", |
|
"middle": [], |
|
"last": "Damen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazel", |
|
"middle": [], |
|
"last": "Doughty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Farinella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonino", |
|
"middle": [], |
|
"last": "Furnari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evangelos", |
|
"middle": [], |
|
"last": "Kazakos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Moltisanti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Munro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toby", |
|
"middle": [], |
|
"last": "Perrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Price", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the European Conference on Computer Vision (ECCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "720--736", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. 2018b. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Imagenet: A large-scale hierarchical image database", |
|
"authors": [ |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li-Jia", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "2009 IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "248--255", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. 2009. Imagenet: A large-scale hier- archical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. Ieee.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Identifying relations for open information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Fader", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1535--1545", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anthony Fader, Stephen Soderland, and Oren Etzioni. 2011. Identifying relations for open information ex- traction. In Proceedings of the conference on empir- ical methods in natural language processing, pages 1535-1545. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Learning to segment actions from observation and narration", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Fried", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Baptiste", |
|
"middle": [], |
|
"last": "Alayrac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Fried, Jean-Baptiste Alayrac, Phil Blunsom, Chris Dyer, Stephen Clark, and Aida Nematzadeh. 2020. Learning to segment actions from observation and narration.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Tall: Temporal activity localization via language query", |
|
"authors": [ |
|
{ |
|
"first": "Jiyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenheng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ram", |
|
"middle": [], |
|
"last": "Nevatia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5267--5275", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Neva- tia. 2017. Tall: Temporal activity localization via language query. In Proceedings of the IEEE Interna- tional Conference on Computer Vision, pages 5267- 5275.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Allennlp: A deep semantic natural language processing platform", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Grus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oyvind", |
|
"middle": [], |
|
"last": "Tafjord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nelson", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1803.07640" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson Liu, Matthew Pe- ters, Michael Schmitz, and Luke Zettlemoyer. 2018. Allennlp: A deep semantic natural language process- ing platform. arXiv preprint arXiv:1803.07640.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Automatic labeling of semantic roles", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Computational linguistics", |
|
"volume": "28", |
|
"issue": "3", |
|
"pages": "245--288", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Gildea and Daniel Jurafsky. 2002. Automatic la- beling of semantic roles. Computational linguistics, 28(3):245-288.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Deep residual learning for image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Kaiming", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoqing", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "770--778", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770- 778.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Finding \"it\": Weakly-supervised, reference-aware visual grounding in instructional videos", |
|
"authors": [ |
|
{ |
|
"first": "De-An", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shyamal", |
|
"middle": [], |
|
"last": "Buch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucio", |
|
"middle": [], |
|
"last": "Dery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Carlos" |
|
], |
|
"last": "Niebles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "De-An Huang, Shyamal Buch, Lucio Dery, Animesh Garg, Li Fei-Fei, and Juan Carlos Niebles. 2018. Finding \"it\": Weakly-supervised, reference-aware visual grounding in instructional videos. In IEEE Conference on Computer Vision and Pattern Recog- nition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Connectionist temporal modeling for weakly supervised action labeling", |
|
"authors": [ |
|
{ |
|
"first": "De-An", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Carlos" |
|
], |
|
"last": "Niebles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "European Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "137--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "De-An Huang, Li Fei-Fei, and Juan Carlos Niebles. 2016. Connectionist temporal modeling for weakly supervised action labeling. In European Conference on Computer Vision, pages 137-153. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Unsupervised visual-linguistic reference resolution in instructional videos", |
|
"authors": [ |
|
{ |
|
"first": "De-An", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Carlos" |
|
], |
|
"last": "Niebles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2183--2192", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "De-An Huang, Joseph J Lim, Li Fei-Fei, and Juan Car- los Niebles. 2017. Unsupervised visual-linguistic reference resolution in instructional videos. In Pro- ceedings of the IEEE Conference on Computer Vi- sion and Pattern Recognition, pages 2183-2192.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Predicting the structure of cooking recipes", |
|
"authors": [ |
|
{ |
|
"first": "Jermsak", |
|
"middle": [], |
|
"last": "Jermsurawong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "781--786", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jermsak Jermsurawong and Nizar Habash. 2015. Pre- dicting the structure of cooking recipes. In Proceed- ings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 781-786.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Mise en place: Unsupervised interpretation of instructional recipes", |
|
"authors": [ |
|
{ |
|
"first": "Chlo\u00e9", |
|
"middle": [], |
|
"last": "Kiddon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thandavam", |
|
"middle": [], |
|
"last": "Ganesa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Ponnuraj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chlo\u00e9 Kiddon, Ganesa Thandavam Ponnuraj, Luke S. Zettlemoyer, and Yejin Choi. 2015. Mise en place: Unsupervised interpretation of instructional recipes. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Weakly supervised learning of actions from transcripts. Computer Vision and Image Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Hilde", |
|
"middle": [], |
|
"last": "Kuehne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juergen", |
|
"middle": [], |
|
"last": "Gall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "163", |
|
"issue": "", |
|
"pages": "78--89", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hilde Kuehne, Alexander Richard, and Juergen Gall. 2017. Weakly supervised learning of actions from transcripts. Computer Vision and Image Under- standing, 163:78-89.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Hake: Human activity knowledge engine", |
|
"authors": [ |
|
{ |
|
"first": "Yong-Lu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xijie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinpeng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ze", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingyang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiyi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao-Shu", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cewu", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.06539" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yong-Lu Li, Liang Xu, Xijie Huang, Xinpeng Liu, Ze Ma, Mingyang Chen, Shiyi Wang, Hao-Shu Fang, and Cewu Lu. 2019. Hake: Human activity knowl- edge engine. arXiv preprint arXiv:1904.06539.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Tsm: Temporal shift module for efficient video understanding", |
|
"authors": [ |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuang", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ji Lin, Chuang Gan, and Song Han. 2019. Tsm: Tem- poral shift module for efficient video understanding. In Proceedings of the IEEE International Confer- ence on Computer Vision.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Jointly learning grounded task structures from language instruction and visual demonstration", |
|
"authors": [ |
|
{ |
|
"first": "Changsong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaohua", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sari", |
|
"middle": [], |
|
"last": "Saba-Sadiya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nishant", |
|
"middle": [], |
|
"last": "Shukla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunzhong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Song-Chun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joyce", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1482--1492", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Changsong Liu, Shaohua Yang, Sari Saba-Sadiya, Nishant Shukla, Yunzhong He, Song-Chun Zhu, and Joyce Chai. 2016. Jointly learning grounded task structures from language instruction and visual demonstration. In Proceedings of the 2016 Con- ference on Empirical Methods in Natural Language Processing, pages 1482-1492.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Simpler context-dependent logical forms via model projections", |
|
"authors": [ |
|
{ |
|
"first": "Reginald", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panupong", |
|
"middle": [], |
|
"last": "Pasupat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.05378" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reginald Long, Panupong Pasupat, and Percy Liang. 2016. Simpler context-dependent logical forms via model projections. arXiv preprint arXiv:1606.05378.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A framework for procedural text understanding", |
|
"authors": [ |
|
{ |
|
"first": "Hirokuni", |
|
"middle": [], |
|
"last": "Maeta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tetsuro", |
|
"middle": [], |
|
"last": "Sasada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinsuke", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 14th International Conference on Parsing Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hirokuni Maeta, Tetsuro Sasada, and Shinsuke Mori. 2015. A framework for procedural text understand- ing. In Proceedings of the 14th International Con- ference on Parsing Technologies, pages 50-60.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "What's cookin'? interpreting cooking videos using text, speech and vision", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Malmaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Rathod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Johnston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Rabinovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "143--152", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/N15-1015" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Malmaud, Jonathan Huang, Vivek Rathod, Nicholas Johnston, Andrew Rabinovich, and Kevin Murphy. 2015. What's cookin'? interpreting cook- ing videos using text, speech and vision. In Pro- ceedings of the 2015 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 143-152, Denver, Colorado. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Cooking with semantics", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Malmaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Earl", |
|
"middle": [], |
|
"last": "Wagner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the ACL 2014 Workshop on Semantic Parsing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Malmaud, Earl Wagner, Nancy Chang, and Kevin Murphy. 2014. Cooking with semantics. In Proceedings of the ACL 2014 Workshop on Semantic Parsing, pages 33-38.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "HowTo100M: Learning a Text-Video Embedding by Watching Hundred Million Narrated Video Clips", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Miech", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitri", |
|
"middle": [], |
|
"last": "Zhukov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Baptiste", |
|
"middle": [], |
|
"last": "Alayrac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Makarand", |
|
"middle": [], |
|
"last": "Tapaswi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Laptev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Sivic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.03327" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. 2019. HowTo100M: Learning a Text-Video Embed- ding by Watching Hundred Million Narrated Video Clips. arXiv:1906.03327.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Wordnet: a lexical database for english", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "George", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Communications of the ACM", |
|
"volume": "38", |
|
"issue": "11", |
|
"pages": "39--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39- 41.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Algorithms for the assignment and transportation problems", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Munkres", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1957, |
|
"venue": "Journal of the society for industrial and applied mathematics", |
|
"volume": "5", |
|
"issue": "1", |
|
"pages": "32--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Munkres. 1957. Algorithms for the assignment and transportation problems. Journal of the society for industrial and applied mathematics, 5(1):32-38.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Frame selection for producing recipe with pictures from an execution video of a recipe", |
|
"authors": [ |
|
{ |
|
"first": "Taichi", |
|
"middle": [], |
|
"last": "Nishimura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoko", |
|
"middle": [], |
|
"last": "Yamakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinsuke", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 11th Workshop on Multimedia for Cooking and Eating Activities", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taichi Nishimura, Atsushi Hashimoto, Yoko Yamakata, and Shinsuke Mori. 2019. Frame selection for pro- ducing recipe with pictures from an execution video of a recipe. In Proceedings of the 11th Workshop on Multimedia for Cooking and Eating Activities, pages 9-16. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Multimodal abstractive summarization for how2 videos", |
|
"authors": [ |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Palaskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jindrich", |
|
"middle": [], |
|
"last": "Libovick\u1ef3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spandana", |
|
"middle": [], |
|
"last": "Gella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Metze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.07901" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shruti Palaskar, Jindrich Libovick\u1ef3, Spandana Gella, and Florian Metze. 2019. Multimodal abstractive summarization for how2 videos. arXiv preprint arXiv:1906.07901.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Learning procedures from text: Codifying how-to procedures in deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Hogun", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamid Reza Motahari", |
|
"middle": [], |
|
"last": "Nezhad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International World Wide Web Conferences Steering Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "351--358", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hogun Park and Hamid Reza Motahari Nezhad. 2018. Learning procedures from text: Codifying how-to procedures in deep neural networks. In Compan- ion Proceedings of the The Web Conference 2018, pages 351-358. International World Wide Web Con- ferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Enhancing video summarization via vision-language embedding", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Plummer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lazebnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5781--5789", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bryan A Plummer, Matthew Brown, and Svetlana Lazebnik. 2017. Enhancing video summarization via vision-language embedding. In Proceedings of the IEEE Conference on Computer Vision and Pat- tern Recognition, pages 5781-5789.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Squad: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.05250" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Dominikus Wetzel, Stefan Thater, Bernt Schiele, and Manfred Pinkal", |
|
"authors": [ |
|
{ |
|
"first": "Michaela", |
|
"middle": [], |
|
"last": "Regneri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Rohrbach", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "25--36", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michaela Regneri, Marcus Rohrbach, Dominikus Wet- zel, Stefan Thater, Bernt Schiele, and Manfred Pinkal. 2013. Grounding action descriptions in videos. Transactions of the Association for Compu- tational Linguistics, 1:25-36.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Action sets: Weakly supervised action segmentation without ordering constraints", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hilde", |
|
"middle": [], |
|
"last": "Kuehne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juergen", |
|
"middle": [], |
|
"last": "Gall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5987--5996", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Richard, Hilde Kuehne, and Juergen Gall. 2018. Action sets: Weakly supervised action seg- mentation without ordering constraints. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5987-5996.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "How2: a large-scale dataset for multimodal language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Ramon", |
|
"middle": [], |
|
"last": "Sanabria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Caglayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Palaskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Metze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1811.00347" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramon Sanabria, Ozan Caglayan, Shruti Palaskar, Desmond Elliott, Lo\u00efc Barrault, Lucia Specia, and Florian Metze. 2018. How2: a large-scale dataset for multimodal language understanding. arXiv preprint arXiv:1811.00347.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Introduction to the conll-2003 shared task: Languageindependent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Erik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F Sang and Fien De Meulder. 2003. Intro- duction to the conll-2003 shared task: Language- independent named entity recognition. arXiv preprint cs/0306050.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Open language learning for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schmitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Bart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Soderland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--534", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Schmitz, Robert Bart, Stephen Soderland, Oren Etzioni, et al. 2012. Open language learning for information extraction. In Proceedings of the 2012 Joint Conference on Empirical Methods in Nat- ural Language Processing and Computational Natu- ral Language Learning, pages 523-534. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Unsupervised semantic parsing of video collections", |
|
"authors": [ |
|
{ |
|
"first": "Ozan", |
|
"middle": [], |
|
"last": "Sener", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvio", |
|
"middle": [], |
|
"last": "Amir R Zamir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashutosh", |
|
"middle": [], |
|
"last": "Savarese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Saxena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4480--4488", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ozan Sener, Amir R Zamir, Silvio Savarese, and Ashutosh Saxena. 2015. Unsupervised semantic parsing of video collections. In Proceedings of the IEEE International Conference on Computer Vision, pages 4480-4488.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Dense procedure captioning in narrated instructional videos", |
|
"authors": [ |
|
{ |
|
"first": "Botian", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaobo", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6382--6391", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Botian Shi, Lei Ji, Yaobo Liang, Nan Duan, Peng Chen, Zhendong Niu, and Ming Zhou. 2019. Dense pro- cedure captioning in narrated instructional videos. In Proceedings of the 57th Conference of the Asso- ciation for Computational Linguistics, pages 6382- 6391.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Simple bert models for relation extraction and semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.05255" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Shi and Jimmy Lin. 2019. Simple bert models for relation extraction and semantic role labeling. arXiv preprint arXiv:1904.05255.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Tvsum: Summarizing web videos using titles", |
|
"authors": [ |
|
{ |
|
"first": "Yale", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordi", |
|
"middle": [], |
|
"last": "Vallmitjana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Stent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alejandro", |
|
"middle": [], |
|
"last": "Jaimes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5179--5187", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yale Song, Jordi Vallmitjana, Amanda Stent, and Ale- jandro Jaimes. 2015. Tvsum: Summarizing web videos using titles. In Proceedings of the IEEE con- ference on computer vision and pattern recognition, pages 5179-5187.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Learning video representations using contrastive bidirectional transformer", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabien", |
|
"middle": [], |
|
"last": "Baradel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cordelia", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Sun, Fabien Baradel, Kevin Murphy, and Cordelia Schmid. 2019a. Learning video represen- tations using contrastive bidirectional transformer.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Videobert: A joint model for video and language representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Austin", |
|
"middle": [], |
|
"last": "Myers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Vondrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cordelia", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.01766" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Sun, Austin Myers, Carl Vondrick, Kevin Mur- phy, and Cordelia Schmid. 2019b. Videobert: A joint model for video and language representation learning. arXiv preprint arXiv:1904.01766.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Coin: A large-scale dataset for comprehensive instructional video analysis", |
|
"authors": [ |
|
{ |
|
"first": "Yansong", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dajun", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongming", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danyang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiwen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1207--1216", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. 2019. Coin: A large-scale dataset for comprehen- sive instructional video analysis. In Proceedings of the IEEE Conference on Computer Vision and Pat- tern Recognition, pages 1207-1216.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Procedural text generation from an execution video", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Ushiku", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hayato", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shinsuke", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "326--335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Ushiku, Hayato Hashimoto, Atsushi Hashimoto, and Shinsuke Mori. 2017. Proce- dural text generation from an execution video. In Proceedings of the Eighth International Joint Con- ference on Natural Language Processing (Volume 1: Long Papers), pages 326-335, Taipei, Taiwan. Asian Federation of Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "RecipeQA: A challenge dataset for multimodal comprehension of cooking recipes", |
|
"authors": [ |
|
{ |
|
"first": "Semih", |
|
"middle": [], |
|
"last": "Yagcioglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aykut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erkut", |
|
"middle": [], |
|
"last": "Erdem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazli", |
|
"middle": [], |
|
"last": "Ikizler-Cinbis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1358--1368", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Na- zli Ikizler-Cinbis. 2018. RecipeQA: A challenge dataset for multimodal comprehension of cooking recipes. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1358-1368, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Situation recognition: Visual semantic role labeling for image understanding", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Yatskar, Luke Zettlemoyer, and Ali Farhadi. 2016. Situation recognition: Visual semantic role labeling for image understanding. In Conference on Computer Vision and Pattern Recognition.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Weakly-supervised video object grounding from text by loss weighting and object interaction", |
|
"authors": [ |
|
{ |
|
"first": "Luowei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Corso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.02834" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luowei Zhou, Nathan Louis, and Jason J Corso. 2018a. Weakly-supervised video object grounding from text by loss weighting and object interaction. arXiv preprint arXiv:1805.02834.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Towards automatic learning of procedures from web instructional videos", |
|
"authors": [ |
|
{ |
|
"first": "Luowei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenliang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Corso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luowei Zhou, Chenliang Xu, and Jason J Corso. 2018b. Towards automatic learning of procedures from web instructional videos. In Thirty-Second AAAI Confer- ence on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "End-to-end dense video captioning with masked transformer", |
|
"authors": [ |
|
{ |
|
"first": "Luowei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yingbo", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Corso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8739--8748", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luowei Zhou, Yingbo Zhou, Jason J Corso, Richard Socher, and Caiming Xiong. 2018c. End-to-end dense video captioning with masked transformer. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8739-8748.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Cross-task weakly supervised learning from instructional videos", |
|
"authors": [ |
|
{ |
|
"first": "Dimitri", |
|
"middle": [], |
|
"last": "Zhukov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Baptiste", |
|
"middle": [], |
|
"last": "Alayrac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramazan", |
|
"middle": [ |
|
"Gokberk" |
|
], |
|
"last": "Cinbis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Fouhey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Laptev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Sivic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitri Zhukov, Jean-Baptiste Alayrac, Ramazan Gok- berk Cinbis, David Fouhey, Ivan Laptev, and Josef Sivic. 2019. Cross-task weakly supervised learning from instructional videos. In Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "An example of extracting procedures for task \"Making Clam Chowder\"." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Annotation interface." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Most frequent verbs (upper) and arguments (lower)." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Extraction pipeline." |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Comparison to current datasets.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Statistics of annotated verbs and arguments in procedures.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"text": "for more examples.", |
|
"content": "<table><tr><td>1</td><td>Input Hello everyone, today i am going to ...</td><td>Key Clip Prediction Is key clip? If yes: Stage 1</td></tr><tr><td>1</td><td/><td>Procedural</td></tr><tr><td/><td/><td>Knowledge</td></tr><tr><td>2</td><td>Put some bacon in there and fry it up \u2026</td><td>Extraction Extract tuples Stage 2 from key clips</td></tr><tr><td>2</td><td/><td>2 2 <put, bacon, \u2026></td></tr><tr><td/><td>You are good to go, thanks for watching! ...</td><td>2 2 <fry, bacon, \u2026> ...</td></tr><tr><td/><td/><td><remove, bacon, \u2026></td></tr><tr><td/><td/><td>Output</td></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"text": "Key clip selection results.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"text": "Kiddon et al. (2015) 12.0 10.9 11.4 18.8 17.2 18.0 20.2 18.4 19.3 0.4 0.9 0.5 10.4 19.3 13.5 16.4 30.2 21.3 SRL w/o heur. 19.4 54.7 28.6 25.3 70.1 37.2 26.6 73.8 39.1 1.3 5.4 2.0 14.1 53.6 22.3 22.0 81.8 34.6 SRL w/ heur. 38.7 51.6 44.3 45.2 60.3 51.7 46.9 62.6 53.6 1.6 3.3 2.2 21.2 39.8 27.7 32.3 59.5 41.9", |
|
"content": "<table><tr><td/><td/><td/><td/><td/><td>Verbs</td><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">Arguments</td><td/><td/></tr><tr><td>Model</td><td colspan=\"3\">Exact Match</td><td/><td>Fuzzy</td><td/><td colspan=\"3\">Partial Fuzzy</td><td colspan=\"2\">Exact Match</td><td/><td>Fuzzy</td><td/><td colspan=\"3\">Partial Fuzzy</td></tr><tr><td/><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"4\">Using oracle key clips</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>Visual</td><td>4.1</td><td>6.7</td><td colspan=\"15\">5.1 17.9 27.8 21.7 19.3 30.1 23.5 0.9 1.1 1.0 17.8 25.8 21.1 24.2 36.2 29.0</td></tr><tr><td>Fusion</td><td colspan=\"17\">19.9 55.2 29.3 28.6 73.3 41.2 31.2 78.6 44.7 1.1 3.8 1.6 16.9 50.0 25.2 24.4 72.5 36.5</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"4\">Using predicted key clips</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">Kiddon et al. (2015) 7.0</td><td>6.3</td><td colspan=\"12\">6.6 10.9 10.0 10.4 11.7 10.7 11.2 0.2 0.5 0.3 6.1 11.2 7.9</td><td colspan=\"3\">9.5 17.5 12.3</td></tr><tr><td>SRL w/o heur.</td><td colspan=\"17\">11.2 31.7 16.6 14.7 40.7 21.6 15.4 42.8 22.6 0.7 3.1 1.2 8.2 31.1 13.0 12.7 47.4 20.1</td></tr><tr><td>SRL w/ heur.</td><td colspan=\"17\">22.5 29.9 25.7 26.2 35.0 30.0 27.2 36.3 31.1 0.9 1.9 1.3 12.3 23.1 16.1 18.8 34.5 24.3</td></tr><tr><td>Visual</td><td>2.4</td><td>3.9</td><td colspan=\"15\">3.0 10.4 16.1 12.6 11.2 17.5 13.7 0.5 0.6 0.6 10.3 15.0 12.2 14.1 21.0 16.8</td></tr><tr><td>Fusion</td><td colspan=\"17\">11.5 32.0 17.0 16.6 42.5 23.9 18.1 45.6 25.9 0.6 2.2 1.0 9.8 29.0 14.6 14.1 42.1 21.2</td></tr></table>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"html": null, |
|
"text": "Clip/sentence-level structured procedure extraction results for verbs and arguments.", |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |