|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:58:07.163223Z" |
|
}, |
|
"title": "Decoupling Pragmatics: Discriminative Decoding for Referring Expression Generation", |
|
"authors": [ |
|
{ |
|
"first": "Simeon", |
|
"middle": [], |
|
"last": "Sch\u00fcz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bielefeld University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bielefeld University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The shift to neural models in Referring Expression Generation (REG) has enabled more natural setups , but at the cost of interpretability. We argue that integrating pragmatic reasoning into the inference of context-agnostic generation models could reconcile traits of traditional and neural REG, as this offers a separation between context-independent, literal information and pragmatic adaptation to context. With this in mind, we apply existing decoding strategies from discriminative image captioning to REG and evaluate them in terms of pragmatic informativity, likelihood to groundtruth annotations and linguistic diversity. Our results show general effectiveness, but a relatively small gain in informativity, raising important questions for REG in general.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The shift to neural models in Referring Expression Generation (REG) has enabled more natural setups , but at the cost of interpretability. We argue that integrating pragmatic reasoning into the inference of context-agnostic generation models could reconcile traits of traditional and neural REG, as this offers a separation between context-independent, literal information and pragmatic adaptation to context. With this in mind, we apply existing decoding strategies from discriminative image captioning to REG and evaluate them in terms of pragmatic informativity, likelihood to groundtruth annotations and linguistic diversity. Our results show general effectiveness, but a relatively small gain in informativity, raising important questions for REG in general.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, neural models have become the workhorses for Referring Expression Generation (REG, e.g. Mao et al., 2016; Yu et al., 2016; Zarrie\u00df and Schlangen, 2018) , as in other tasks in the Vision and Language (V&L) domain (Mogadala et al., 2019) . In REG, this was accompanied by a major shift in how the task was conceptualized. Classical approaches (e.g. Dale, 1989; Dale and Reiter, 1995) mostly investigated rule-based procedures to determine combinations of properties that distinguish target objects from distractors, based on knowledge bases of objects and associated attributes (Krahmer and van Deemter, 2019) . Recent work in REG has shifted to more natural settings (e.g. objects in photographs, cf. Figure 1 ), but at the expense of interpretability: Since continuous representations have replaced knowledge bases as the input, pragmatic processes in neural REG no longer operate on symbolic properties, but are deeply woven into model architectures and training schemes. Decoding and reasoning methods for discriminative image captioning (Vedantam et al., 2017; Cohn-Gordon et al., 2018) could represent a middle ground in this regard: During inference, predictions from a captioning model are reranked according to pragmatic principles, boosting contextually informative and inhibiting ambiguous utterances. This offers interesting similarities to traditional REG, as it is carried out through explicit algorithms and targets symbolic representations (e.g. word tokens). Discriminative decoding has been shown to be effective for image captioning (Vedantam et al., 2017; Cohn-Gordon et al., 2018; Sch\u00fcz et al., 2021) . In this work, we investigate discriminative decoding for REG, adapting the methods from Vedantam et al. (2017) and Cohn-Gordon et al. (2018) . We compare them to standard greedy and beam search decoding, in terms of informativity, likelihood to ground-truth annotations, and linguistic diversity. We show that discriminative decoding increases informativity and diversity, although the results are less clear than expected. We attribute this, in part, to the way human annotations are collected, highlighting implications for REG research in general.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 122, |
|
"text": "Mao et al., 2016;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 139, |
|
"text": "Yu et al., 2016;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 168, |
|
"text": "Zarrie\u00df and Schlangen, 2018)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 252, |
|
"text": "(Mogadala et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 375, |
|
"text": "Dale, 1989;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 398, |
|
"text": "Dale and Reiter, 1995)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 593, |
|
"end": 624, |
|
"text": "(Krahmer and van Deemter, 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1057, |
|
"end": 1080, |
|
"text": "(Vedantam et al., 2017;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1081, |
|
"end": 1106, |
|
"text": "Cohn-Gordon et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1567, |
|
"end": 1590, |
|
"text": "(Vedantam et al., 2017;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1591, |
|
"end": 1616, |
|
"text": "Cohn-Gordon et al., 2018;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1617, |
|
"end": 1636, |
|
"text": "Sch\u00fcz et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1727, |
|
"end": 1749, |
|
"text": "Vedantam et al. (2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1754, |
|
"end": 1779, |
|
"text": "Cohn-Gordon et al. (2018)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 717, |
|
"end": 725, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Traditional and neural REG In REG, the goal is to generate descriptions for entities, which al-low their identification in a given context (Reiter and Dale, 2000) ; i.e. generating expressions with sufficient, but not too much information, following a Gricean notion of pragmatics (Grice, 1975; Krahmer and van Deemter, 2019) . In classic work, target and distractor objects were defined in terms of symbolic attributes and associated values (e.g. color -red). The full REG task was conceived as involving different levels of processing, i.e. lexicalization, content selection and surface realization (Reiter and Dale, 2000; Krahmer and van Deemter, 2019) . However, foundational work in REG has mostly focused on algorithms for finding distinguishing sets of attribute-value pairs, which apply to the target, but rule out distractor objects, such as the Incremental Algorithm (IA, Dale and Reiter, 1995) . This algorithm iterates over the attribute set in a pre-defined order, selects an attribute if it rules out objects from the set of distractors and terminates when the set is empty. It has been refined, extended and tested in subsequent work (Krahmer et al., 2003; Mitchell et al., 2010; van Deemter et al., 2012; Clarke et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 162, |
|
"text": "(Reiter and Dale, 2000)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 281, |
|
"end": 294, |
|
"text": "(Grice, 1975;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 325, |
|
"text": "Krahmer and van Deemter, 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 624, |
|
"text": "(Reiter and Dale, 2000;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 655, |
|
"text": "Krahmer and van Deemter, 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 882, |
|
"end": 904, |
|
"text": "Dale and Reiter, 1995)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1149, |
|
"end": 1171, |
|
"text": "(Krahmer et al., 2003;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1172, |
|
"end": 1194, |
|
"text": "Mitchell et al., 2010;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1195, |
|
"end": 1220, |
|
"text": "van Deemter et al., 2012;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1221, |
|
"end": 1241, |
|
"text": "Clarke et al., 2013)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In recent years, neural models have enabled REG set-ups based on real-world images (Kazemzadeh et al., 2014; Gkatzia et al., 2015; Mao et al., 2015; Schlangen, 2016, 2018; Tanaka et al., 2019; Liu et al., 2020; Kim et al., 2020; Panagiaris et al., 2020 Panagiaris et al., , 2021 , representing scenes with many different types of real-world objects. Most commonly, neural REG models follow the encoderdecoder scheme and are trained end-to-end. Based on low-level visual representations as the input, various aspects of the task are modeled jointly, e.g. lexicalization and content selection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 108, |
|
"text": "(Kazemzadeh et al., 2014;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 130, |
|
"text": "Gkatzia et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 148, |
|
"text": "Mao et al., 2015;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "Schlangen, 2016, 2018;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 192, |
|
"text": "Tanaka et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 210, |
|
"text": "Liu et al., 2020;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 228, |
|
"text": "Kim et al., 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 252, |
|
"text": "Panagiaris et al., 2020", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 278, |
|
"text": "Panagiaris et al., , 2021", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Neural Generation and Pragmatics Various approaches were proposed to generate more discriminative expressions in neural REG, such as specialized training objectives (Mao et al., 2016) , enhanced input representations and joint generation for objects in the same scene (Yu et al., 2016) , listener / comprehension components or reinforcement modules (Luo and Shakhnarovich, 2017; Yu et al., 2017) , and classifiers which predict attributes for depicted objects (Liu et al., 2017 (Liu et al., , 2020 . Here, the REG models are trained to jointly determine truthful descriptions for depicted objects and formulate expressions that are unambiguous in a given context. Hence, semantic and pragmatic processing are tightly intertwined, preventing a clear separation between context-independent information and pragmatic adaption as in traditional REG.", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 183, |
|
"text": "(Mao et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 285, |
|
"text": "(Yu et al., 2016)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 378, |
|
"text": "(Luo and Shakhnarovich, 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 395, |
|
"text": "Yu et al., 2017)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 477, |
|
"text": "(Liu et al., 2017", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 497, |
|
"text": "(Liu et al., , 2020", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In image captioning, e.g. Andreas and Klein (2016); Vedantam et al. 2017; Cohn-Gordon et al. (2018) tried to generate pragmatically informative captions, by decoding general captioning models, at testing time, to produce captions that discriminate target images from a given set of distractor images. This corresponds more closely to approaches such as the IA, as it takes place over a finite set of symbolic (word) tokens and leaves the literal generation process untouched. In this work we use the methods proposed by Vedantam et al. (2017) and Cohn-Gordon et al. (2018) and adapt them to neural REG. For evaluation, we roughly follow the experimental set-up from Sch\u00fcz et al. (2021) and consider likelihood to human annotations, informativity and diversity, the latter as a proxy for the degree of linguistic adaptation to context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 520, |
|
"end": 542, |
|
"text": "Vedantam et al. (2017)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 572, |
|
"text": "Cohn-Gordon et al. (2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 685, |
|
"text": "Sch\u00fcz et al. (2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We compare contrasting decoding methods, which focus either on likelihood or informativity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For the former, Greedy Search selects the token with the highest probability at each time step. Beam Search simultaneously extends a fixed number of k hypotheses at each step (here: k = 5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Based on the Rational Speech Acts model (Frank and Goodman, 2012), RSA Decoding (Cohn-Gordon et al., 2018, henceforth RSA) aims for higher informativity by integrating pragmatic reasoning into the iterative unrolling of recurrent captioning models. Given a target and a set of distractors, the literal speaker S 0 generates initial distributions over possible next tokens. The literal listener L 0 determines which tokens effectively distinguish the target from the distractors. Finally, the pragmatic speaker S 1 selects tokens rated informative by L 0 . A rationality parameter \u03b1 specifies the relative influence of L 0 in S 1 , cf. Cohn-Gordon et al. (2018) for more details. In our REG setting, targets and distractors are objects in the same image.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the conceptually similar Emitter-Suppressor approach (Vedantam et al., 2017, henceforth ES) , a speaker (emitter) models a caption for a target image I t in conjuction with a listener function (suppressor) that rates the discriminativeness of the utterance with regard to a distractor image, cf. Vedantam et al. (2017) . \u03bb is a rationality parameter; the smaller the value of \u03bb, the more the suppressor is weighted. We adapt the extended implementation from Sch\u00fcz et al. (2021) for multiple distractors. We use both RSA and ES in a beam search decoding scheme. Whereas in the original approaches the number of distractors is fixed, it varies between images in our REG setting: For an image with n objects, the number of distractors is n \u2212 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 94, |
|
"text": "(Vedantam et al., 2017, henceforth ES)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 321, |
|
"text": "(2017)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 480, |
|
"text": "Sch\u00fcz et al. (2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoding Methods", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use the data and pre-defined splits from Re-fCOCO, RefCOCO+ (Kazemzadeh et al., 2014) and RefCOCOg (Mao et al., 2016) for training and evaluation. All of these datasets contain English referring expressions to objects in images from MSCOCO (Lin et al., 2014) , collected in interactive (RefCOCO, RefCOCO+) or non-interactive (Ref-COCOg) settings. In RefCOCO and RefCOCO+, testA contains references to humans and testB references to other object types. Since both targets and distractors are required for RSA and ES, we remove images from our test splits which contain only a single object. After this, our test splits comprise approximately 1950 (testA / testA+), 1750 (testB / testB+) and 4000 (testg) objects.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 88, |
|
"text": "(Kazemzadeh et al., 2014)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 120, |
|
"text": "(Mao et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 261, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We adopted the image captioning model from Lu et al. 20171 as the basis for our REG model. Similar to e.g. Mao et al. (2016) , we complemented the original model by supplying 7 location features along with the input image.", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 124, |
|
"text": "Mao et al. (2016)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data and Model", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Likelihood is measured through BLEU 1 (Papineni et al., 2002) and CIDEr (Vedantam et al., 2015) scores, calculated using the RefCOCO API 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 61, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 95, |
|
"text": "(Vedantam et al., 2015)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For Diversity, we calculate the type-token ratio (TTR) for unigrams and bigrams, and the proportion of the model vocabulary used (coverage). Importantly, we look at global diversity, i.e. the 1 https://github.com/yufengm/Adaptive 2 https://github.com/lichengunc/refer corpus-level variation in the usage of words and phrases (van Miltenburg et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 325, |
|
"end": 354, |
|
"text": "(van Miltenburg et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Informativity is assessed through the precision of a separate, pre-trained Referring Expression Comprehension model (Luo et al., 2020) . Given a generated expression and corresponding image, the model predicts a bounding box which locates the described object in the image. As in the original paper, predictions are deemed correct if the intersection over union between predicted and groundtruth bounding boxes is greater than 0.5. Previous work in neural REG mostly assessed informativity through human evaluation (e.g. Yu et al., 2016 Yu et al., , 2017 Liu et al., 2017 Liu et al., , 2020 . We decided for automatic evaluation for the sake of better comparability and exhaustive coverage of our expressions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 134, |
|
"text": "(Luo et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 536, |
|
"text": "Yu et al., 2016", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 537, |
|
"end": 554, |
|
"text": "Yu et al., , 2017", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 571, |
|
"text": "Liu et al., 2017", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 590, |
|
"text": "Liu et al., , 2020", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The results in Table 1 show that discriminative decoding leads to a decrease in both BLEU (BL 1 ) and CIDEr (CDr). Whereas ES with \u03bb = 0.7 achieves comparable results to greedy and beam search, both metrics drop if rationality is increased. In most cases, this also applies to RSA. This corresponds to the general findings in Sch\u00fcz et al. (2021) : With higher rationality, ES and RSA generate expressions that deviate further from the model predictions, resulting in lower n-gram overlap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 326, |
|
"end": 345, |
|
"text": "Sch\u00fcz et al. (2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 22, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Likelihood and Diversity", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Similarly, the diversity results in Table 2 confirm the findings in Sch\u00fcz et al. (2021) . Discriminative decoding increases TTR (T 1 , T 2 ) and coverage (cov.), indicating that pragmatic reasoning leads to more variation and the usage of a larger vocabulary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 87, |
|
"text": "Sch\u00fcz et al. (2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 43, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Likelihood and Diversity", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For informativity, ES and RSA outperform greedy and beam search (cf. even greedy decoding can perform well in certain cases (e.g. testA). Overall, the gain is rather modest: Here, the maximum relative increase is 10% (testB+), whereas Sch\u00fcz et al. (2021) report more than 30% increase in retrieval 3 . This could be due to upper bounds for possible detection results: Depending on the data set, the comprehension task itself poses a considerable challenge, as can be seen, for example, in the detection results for human annotations in testB+. An alternative explanation can be seen in the data used to train the model: Unlike in image captioning, the utterances in our datasets were explicitly produced for distinguishing targets and distractors. Thus, by re-using linguistic patterns from the training data, our model might be able to generate relatively informative expressions without even considering the situational context. This way of implicitly learning to fulfill pragmatic requirements might render additional layers of pragmatic reasoning less effective. This hypothesis is supported by the decent results for greedy search e.g. in testA. Also, beam search occasionally improves the greedy results, indicating that optimizing model predictions increases pragmatic informativity. In similar set-ups for image captioning, beam search was reported to decrease informativity (Sch\u00fcz et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 254, |
|
"text": "Sch\u00fcz et al. (2021)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1383, |
|
"end": 1403, |
|
"text": "(Sch\u00fcz et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Somewhat surprisingly, ES with \u03bb = 0.5 mostly obtains better results than \u03bb = 0.3, i.e. higher rationality does not always lead to higher informativity. Figure 2 shows this for a wider range of \u03bb: For every data split, the detection results drop drastically if \u03bb approaches 0. We attribute this to ES struggling to generate well-formed descriptions for high rationalities (as reflected in BLEU 1 and CIDEr for \u03bb = 0.3), and increasingly diverging from the data used for training the comprehension model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 161, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Informativity", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Discriminative decoding is appealing for REG, as it combines traits from traditional REG with neural generation models. Our results confirm findings previously reported for image captioning: Discriminative decoding decreases likelihood to groundtruth annotations, but increases informativity and diversity. For informativity, the margin of gain was surprisingly low in our experiments. We attributed this, in part, to the high informativity of underlying model expressions. While this is of importance especially in our setup (both ES and RSA assume a basis of pragmatically neutral descriptions), the question of whether pragmatic informativity has to be explicitly modelled or is implicitly learned from the data is relevant for REG research in general, and should be investigated in subsequent work. To this end, controlling the impact of pragmatic processing as in discriminative decoding could be a valuable instrument.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Beyond this, future work should investigate discriminative decoding in REG in more detail, e.g. to see whether pragmatic reasoning leads to the generation of different or more specific attributes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Due to differences in tasks, models, data and evaluation, this comparison should be taken with caution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Reasoning about pragmatics with neural listeners and speakers", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Andreas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1173--1182", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D16-1125" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Andreas and Dan Klein. 2016. Reasoning about pragmatics with neural listeners and speakers. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1173-1182, Austin, Texas. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Where's wally: the influence of visual salience on referring expression generation. Frontiers in psychology", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Alasdair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micha", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Elsner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rohde", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alasdair DF Clarke, Micha Elsner, and Hannah Ro- hde. 2013. Where's wally: the influence of visual salience on referring expression generation. Fron- tiers in psychology, 4.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Pragmatically informative image captioning with character-level inference", |
|
"authors": [ |
|
{ |
|
"first": "Reuben", |
|
"middle": [], |
|
"last": "Cohn-Gordon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "439--443", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-2070" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reuben Cohn-Gordon, Noah Goodman, and Christo- pher Potts. 2018. Pragmatically informative image captioning with character-level inference. In Pro- ceedings of the 2018 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 2 (Short Papers), pages 439-443, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Cooking up referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "27th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "68--75", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/981623.981632" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Dale. 1989. Cooking up referring expres- sions. In 27th Annual Meeting of the Association for Computational Linguistics, pages 68-75, Vancouver, British Columbia, Canada. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Computational interpretations of the gricean maxims in the generation of referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Cognitive Science", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "233--263", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Dale and Ehud Reiter. 1995. Computational interpretations of the gricean maxims in the gener- ation of referring expressions. Cognitive Science, 19(2):233-263.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Generation of referring expressions: Assessing the incremental algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Kees Van Deemter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Ielka Van Der Sluis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Power", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Cognitive Science", |
|
"volume": "36", |
|
"issue": "5", |
|
"pages": "799--836", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/j.1551-6709.2011.01205.x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kees van Deemter, Albert Gatt, Ielka van der Sluis, and Richard Power. 2012. Generation of referring expressions: Assessing the incremental algorithm. Cognitive Science, 36(5):799-836.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Predicting pragmatic reasoning in language games", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Noah D Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Science", |
|
"volume": "336", |
|
"issue": "6084", |
|
"pages": "998--998", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael C Frank and Noah D Goodman. 2012. Pre- dicting pragmatic reasoning in language games. Sci- ence, 336(6084):998-998.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "From the virtual to the real world: Referring to objects in real-world spatial scenes", |
|
"authors": [ |
|
{ |
|
"first": "Dimitra", |
|
"middle": [], |
|
"last": "Gkatzia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Verena", |
|
"middle": [], |
|
"last": "Rieser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Bartie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Mackaness", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of EMNLP 2015. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitra Gkatzia, Verena Rieser, Phil Bartie, and William Mackaness. 2015. From the virtual to the real world: Referring to objects in real-world spatial scenes. In Proceedings of EMNLP 2015. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Logic and conversation", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Grice", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1975, |
|
"venue": "Syntax and Semantics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "41--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. P. Grice. 1975. Logic and conversation. In Pe- ter Cole and Jerry L. Morgan, editors, Syntax and Semantics: Vol. 3: Speech Acts, pages 41-58. Aca- demic Press, New York.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "ReferItGame: Referring to Objects in Photographs of Natural Scenes", |
|
"authors": [ |
|
{ |
|
"first": "Sahar", |
|
"middle": [], |
|
"last": "Kazemzadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicente", |
|
"middle": [], |
|
"last": "Ordonez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Matten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tamara", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Berg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "787--798", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara L Berg. 2014. ReferItGame: Referring to Objects in Photographs of Natural Scenes. In Pro- ceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP 2014), pages 787-798, Doha, Qatar.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Co-NAN: A complementary neighboring-based attention network for referring expression generation", |
|
"authors": [ |
|
{ |
|
"first": "Jungjun", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanbin", |
|
"middle": [], |
|
"last": "Ko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jialin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1952--1962", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jungjun Kim, Hanbin Ko, and Jialin Wu. 2020. Co- NAN: A complementary neighboring-based atten- tion network for referring expression generation. In Proceedings of the 28th International Conference on Computational Linguistics, pages 1952-1962, Barcelona, Spain (Online). International Committee on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Computational Generation of Referring Expressions: An Updated Survey", |
|
"authors": [ |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kees Van Deemter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1093/oxfordhb/9780199687305.013.19" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emiel Krahmer and Kees van Deemter. 2019. Com- putational Generation of Referring Expressions: An Updated Survey.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Graph-based generation of referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [], |
|
"last": "Sebastiaan Van Erk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Verleg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "53--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emiel Krahmer, Sebastiaan van Erk, and Andr\u00e9 Verleg. 2003. Graph-based generation of referring expres- sions. Computational Linguistics, 29(1):53-72.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Microsoft coco: Common objects in context", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deva", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European conference on computer vision", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "740--755", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In European confer- ence on computer vision, pages 740-755. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Referring expression generation and comprehension via attributes", |
|
"authors": [ |
|
{ |
|
"first": "Jingyu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Hsuan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/iccv.2017.520" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingyu Liu, Liang Wang, and Ming-Hsuan Yang. 2017. Referring expression generation and comprehension via attributes. In 2017 IEEE International Confer- ence on Computer Vision (ICCV). IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Attribute-guided attention for referring expression generation and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Jingyu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Hsuan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Transactions on Image Processing", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "5244--5258", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/tip.2020.2979010" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingyu Liu, Wei Wang, Liang Wang, and Ming-Hsuan Yang. 2020. Attribute-guided attention for referring expression generation and comprehension. IEEE Transactions on Image Processing, 29:5244-5258.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Knowing when to look: Adaptive attention via a visual sentinel for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Jiasen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "375--383", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiasen Lu, Caiming Xiong, Devi Parikh, and Richard Socher. 2017. Knowing when to look: Adaptive at- tention via a visual sentinel for image captioning. In Proceedings of the IEEE conference on computer vi- sion and pattern recognition, pages 375-383.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Multi-task collaborative network for joint referring expression comprehension and segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Gen", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiyi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoshuai", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liujuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenglin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheng", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rongrong", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gen Luo, Yiyi Zhou, Xiaoshuai Sun, Liujuan Cao, Chenglin Wu, Cheng Deng, and Rongrong Ji. 2020. Multi-task collaborative network for joint referring expression comprehension and segmentation. In Proceedings of the IEEE/CVF Conference on Com- puter Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Comprehension-guided referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Shakhnarovich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Luo and Gregory Shakhnarovich. 2017. Comprehension-guided referring expressions.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3125--3134", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3125-3134.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Generation and comprehension of unambiguous object descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Junhua", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Toshev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oana", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Yuille", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2016.9" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan Yuille, and Kevin Murphy. 2016. Generation and comprehension of unambiguous ob- ject descriptions. In 2016 IEEE Conference on Com- puter Vision and Pattern Recognition (CVPR), pages 11-20.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Generation and comprehension of unambiguous object descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Junhua", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Toshev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oana", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Yuille", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L. Yuille, and Kevin Murphy. 2015. Generation and comprehension of unambiguous ob- ject descriptions. ArXiv / CoRR, abs/1511.02283.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Measuring the diversity of automatic image descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Desmond", |
|
"middle": [], |
|
"last": "Emiel Van Miltenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piek", |
|
"middle": [], |
|
"last": "Elliott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vossen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1730--1741", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emiel van Miltenburg, Desmond Elliott, and Piek Vossen. 2018. Measuring the diversity of automatic image descriptions. In Proceedings of the 27th Inter- national Conference on Computational Linguistics, pages 1730-1741, Santa Fe, New Mexico, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Natural reference to objects in a visual domain", |
|
"authors": [ |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Kees Van Deemter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 6th international natural language generation conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Margaret Mitchell, Kees van Deemter, and Ehud Re- iter. 2010. Natural reference to objects in a visual domain. In Proceedings of the 6th international nat- ural language generation conference, pages 95-104. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Trends in integration of vision and language research: A survey of tasks, datasets, and methods", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Mogadala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marimuthu", |
|
"middle": [], |
|
"last": "Kalimuthu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dietrich", |
|
"middle": [], |
|
"last": "Klakow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Mogadala, Marimuthu Kalimuthu, and Dietrich Klakow. 2019. Trends in integration of vision and language research: A survey of tasks, datasets, and methods. CoRR, abs/1907.09358.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Improving the naturalness and diversity of referring expression generation models using minimum risk training", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Panagiaris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Hart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitra", |
|
"middle": [], |
|
"last": "Gkatzia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 13th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Panagiaris, Emma Hart, and Dimitra Gkatzia. 2020. Improving the naturalness and diversity of referring expression generation models using mini- mum risk training. In Proceedings of the 13th Inter- national Conference on Natural Language Genera- tion, pages 41-51, Dublin, Ireland. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Generating unambiguous and diverse referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Panagiaris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Hart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitra", |
|
"middle": [], |
|
"last": "Gkatzia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Computer Speech & Language", |
|
"volume": "68", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.csl.2020.101184" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Panagiaris, Emma Hart, and Dimitra Gkatzia. 2021. Generating unambiguous and diverse refer- ring expressions. Computer Speech & Language, 68:101184.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Building natural language generation systems", |
|
"authors": [ |
|
{ |
|
"first": "Ehud", |
|
"middle": [], |
|
"last": "Reiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Dale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehud Reiter and Robert Dale. 2000. Building natural language generation systems. Cambridge university press.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Diversity as a by-product: Goal-oriented language generation leads to linguistic variation", |
|
"authors": [ |
|
{ |
|
"first": "Simeon", |
|
"middle": [], |
|
"last": "Sch\u00fcz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 22th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "175--185", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simeon Sch\u00fcz, Ting Han, and Sina Zarrie\u00df. 2021. Di- versity as a by-product: Goal-oriented language gen- eration leads to linguistic variation. In Proceedings of the 22th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 175-185. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Generating easyto-understand referring expressions for target identifications", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takayuki", |
|
"middle": [], |
|
"last": "Itamochi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Narioka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ushiku", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Harada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE/CVF International Conference on Computer Vision (ICCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5793--5802", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Tanaka, Takayuki Itamochi, K. Narioka, Ikuro Sato, Y. Ushiku, and T. Harada. 2019. Generating easy- to-understand referring expressions for target identi- fications. 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5793-5802.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Context-aware captions from context-agnostic supervision", |
|
"authors": [ |
|
{ |
|
"first": "Ramakrishna", |
|
"middle": [], |
|
"last": "Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gal", |
|
"middle": [], |
|
"last": "Chechik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "251--260", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramakrishna Vedantam, Samy Bengio, Kevin Murphy, Devi Parikh, and Gal Chechik. 2017. Context-aware captions from context-agnostic supervision. In Pro- ceedings of the IEEE Conference on Computer Vi- sion and Pattern Recognition, pages 251-260.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Cider: Consensus-based image description evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Ramakrishna", |
|
"middle": [], |
|
"last": "Vedantam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4566--4575", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. 2015. Cider: Consensus-based image de- scription evaluation. In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 4566-4575.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Modeling context in referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Licheng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Poirson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shan", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Berg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tamara", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Berg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computer Vision -ECCV 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "69--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Licheng Yu, Patrick Poirson, Shan Yang, Alexander C. Berg, and Tamara L. Berg. 2016. Modeling con- text in referring expressions. In Computer Vision - ECCV 2016, pages 69-85, Cham. Springer Interna- tional Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "A joint speaker-listener-reinforcer model for referring expressions", |
|
"authors": [ |
|
{ |
|
"first": "Licheng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tamara", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Berg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Licheng Yu, Hao Tan, Mohit Bansal, and Tamara L Berg. 2017. A joint speaker-listener-reinforcer model for referring expressions. In Computer Vision and Pattern Recognition (CVPR), volume 2.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Easy things first: Installments improve referring expression generation for objects in photographs", |
|
"authors": [ |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Schlangen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "610--620", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sina Zarrie\u00df and David Schlangen. 2016. Easy things first: Installments improve referring expression gen- eration for objects in photographs. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 610-620, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Decoding strategies for neural referring expression generation", |
|
"authors": [ |
|
{ |
|
"first": "Sina", |
|
"middle": [], |
|
"last": "Zarrie\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Schlangen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "503--512", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-6563" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sina Zarrie\u00df and David Schlangen. 2018. Decoding strategies for neural referring expression generation. In Proceedings of the 11th International Conference on Natural Language Generation, pages 503-512, Tilburg University, The Netherlands. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Example from RefCOCO. ES and RSA describe the target (marked green) less ambiguously.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Detection results for different \u03bb settings in ES decoding. Crosses mark the highest detection results.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"text": "0.79 75.1 56.2 1.27 66.4 45.6 0.68 63.8 33.9 0.64 43.0 45.0 0.71 54.4 beam 52.8 0.81 75.0 55.4 1.31 66.0 40.4 0.66 62.4 32.5 0.75 43.6 45.0 0.79 54.8 ES \u03bb0.7 52.9 0.80 77.6 55.7 1.24 70.5 40.3 0.67 67.1 30.9 0.71 46.9 44.1 0.74 57.1 ES \u03bb0.5 49.8 0.73 80.6 53.3 1.11 71.8 37.1 0.60 68.7 27.4 0.61 47.3 40.4 0.63 57.0 ES \u03bb0.3 35.8 0.53 79.1 44.5 0.81 71.3 23.1 0.37 66.5 21.0 0.40 48.1 29.5 0.37 54.6", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td>testA</td><td/><td/><td>testB</td><td/><td/><td>testA+</td><td/><td/><td>testB+</td><td>testg</td></tr><tr><td/><td colspan=\"2\">BL1 CDr</td><td colspan=\"3\">det. BL1 CDr</td><td colspan=\"3\">det. BL1 CDr</td><td colspan=\"3\">det. BL1 CDr</td><td>det. BL1 CDr</td><td>det.</td></tr><tr><td colspan=\"12\">greedy 53.2 RSA\u03b10.5 50.2 0.77 76.0 55.1 1.26 69.4 32.7 0.58 62.8 28.2 0.67 44.4 43.5 0.70 55.7</td></tr><tr><td colspan=\"12\">RSA\u03b11.0 50.4 0.77 76.4 54.8 1.22 69.1 33.3 0.59 63.2 27.5 0.65 44.6 43.0 0.69 56.1</td></tr><tr><td colspan=\"12\">RSA\u03b15.0 50.9 0.75 79.3 52.7 1.05 70.9 35.4 0.57 66.3 25.7 0.58 46.8 40.2 0.61 57.4</td></tr><tr><td>human</td><td>-</td><td colspan=\"2\">-84.4</td><td>-</td><td colspan=\"2\">-74.2</td><td>-</td><td colspan=\"2\">-72.6</td><td>-</td><td>-57.9</td><td>-</td><td>-63.7</td></tr><tr><td colspan=\"12\">Table 1: Likelihood (BLEU 1 , CIDEr) and informativity (detection) for decoding strategies and data splits.</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "det.), although", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td/><td>testA</td><td/><td/><td>testB</td><td/><td/><td>testA+</td><td/><td/><td>testB+</td><td/><td/><td>testg</td></tr><tr><td/><td>T1</td><td>T2</td><td>cov.</td><td>T1</td><td>T2</td><td>cov.</td><td>T1</td><td>T2</td><td>cov.</td><td>T1</td><td>T2</td><td>cov.</td><td>T1</td><td>T2</td><td>cov.</td></tr><tr><td>greedy</td><td colspan=\"2\">7.0 29.2</td><td colspan=\"3\">4.1 10.6 46.5</td><td>5.6</td><td colspan=\"2\">8.7 28.2</td><td colspan=\"3\">4.2 16.5 48.1</td><td colspan=\"4\">7.7 16.7 40.0 10.7</td></tr><tr><td>beam</td><td colspan=\"2\">6.6 27.8</td><td colspan=\"3\">3.6 10.3 48.6</td><td>5.2</td><td colspan=\"2\">9.4 32.7</td><td colspan=\"3\">4.1 16.4 56.4</td><td colspan=\"4\">6.9 17.5 42.4 10.4</td></tr><tr><td>ES \u03bb0.7</td><td colspan=\"2\">8.3 35.0</td><td colspan=\"3\">4.8 11.6 49.7</td><td colspan=\"3\">6.1 12.5 42.6</td><td colspan=\"3\">5.4 19.3 64.0</td><td colspan=\"4\">7.9 19.8 48.0 13.0</td></tr><tr><td>ES \u03bb0.5</td><td colspan=\"2\">11.6 46.4</td><td colspan=\"3\">6.9 13.4 54.4</td><td colspan=\"3\">7.6 16.1 54.4</td><td colspan=\"3\">7.9 22.2 70.3</td><td colspan=\"4\">9.5 22.7 56.4 16.5</td></tr><tr><td>ES \u03bb0.3</td><td colspan=\"15\">17.5 60.7 12.6 18.4 62.6 13.0 22.7 65.8 13.2 29.2 80.3 14.8 28.7 71.0 23.6</td></tr><tr><td>RSA\u03b10.5</td><td colspan=\"2\">7.8 33.7</td><td colspan=\"3\">4.3 11.6 50.2</td><td colspan=\"3\">5.9 12.1 43.2</td><td colspan=\"3\">5.0 18.5 61.0</td><td colspan=\"4\">7.4 19.8 48.1 12.6</td></tr><tr><td>RSA\u03b11.0</td><td colspan=\"2\">7.8 34.6</td><td colspan=\"3\">4.4 11.8 51.6</td><td colspan=\"3\">5.9 13.0 47.0</td><td colspan=\"3\">5.7 19.7 64.7</td><td colspan=\"4\">7.9 20.4 49.8 13.6</td></tr><tr><td>RSA\u03b15.0</td><td colspan=\"2\">9.4 39.0</td><td colspan=\"3\">5.7 13.5 55.8</td><td colspan=\"3\">7.2 16.1 54.1</td><td colspan=\"7\">7.6 23.9 74.4 10.6 22.5 57.7 16.3</td></tr><tr><td>human</td><td colspan=\"15\">24.9 71.7 22.4 27.6 79.6 23.1 31.2 80.6 20.9 39.6 91.2 26.2 34.0 77.8 44.4</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Diversity results (TTR 1 , TTR 2 , coverage) for decoding strategies and data splits", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |