|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:25:05.704013Z" |
|
}, |
|
"title": "Scene Graph Parsing via Abstract Meaning Representation in Pre-trained Language Models", |
|
"authors": [ |
|
{ |
|
"first": "Suk", |
|
"middle": [], |
|
"last": "Woo", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yu-Jung", |
|
"middle": [], |
|
"last": "Heo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Dharani", |
|
"middle": [], |
|
"last": "Punitan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Byoung-Tak", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this work, we propose the application of abstract meaning representation (AMR) based semantic parsing models to parse textual descriptions of a visual scene into scene graphs, which is the first work to the best of our knowledge. Previous works examined scene graph parsing from textual descriptions using dependency parsing and left the AMR parsing approach as future work since sophisticated methods are required to apply AMR. Hence, we use pre-trained AMR parsing models to parse the region descriptions of visual scenes (i.e. images) into AMR graphs and pre-trained language models (PLM), BART and T5, to parse AMR graphs into scene graphs. The experimental results show that our approach explicitly captures high-level semantics from textual descriptions of visual scenes, such as objects, attributes of objects, and relationships between objects. Our textual scene graph parsing approach outperforms the previous state-of-the-art results by 9.3% in the SPICE metric score.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this work, we propose the application of abstract meaning representation (AMR) based semantic parsing models to parse textual descriptions of a visual scene into scene graphs, which is the first work to the best of our knowledge. Previous works examined scene graph parsing from textual descriptions using dependency parsing and left the AMR parsing approach as future work since sophisticated methods are required to apply AMR. Hence, we use pre-trained AMR parsing models to parse the region descriptions of visual scenes (i.e. images) into AMR graphs and pre-trained language models (PLM), BART and T5, to parse AMR graphs into scene graphs. The experimental results show that our approach explicitly captures high-level semantics from textual descriptions of visual scenes, such as objects, attributes of objects, and relationships between objects. Our textual scene graph parsing approach outperforms the previous state-of-the-art results by 9.3% in the SPICE metric score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Understanding and representing a scene is straightforward for humans, but an AI system requires various techniques to implement it. One such technique is scene graph proposed by (Johnson et al., 2015) . Scene graph is a graph-structured representation that captures high-level semantics of visual scenes (i.e. images) by explicitly modeling objects along with their attributes and relationships with other objects. Scene graph is demonstrated effective in various tasks including semantic image retrieval Schroeder and Tripathi, 2020) , image captioning (Yang et al., 2019; Zhong et al., 2020) , and visual question answering (Hildebrandt et al., 2020; Damodaran et al., 2021) . Approaches for scene graph generation are classified into two categories: 1) scene graph generation based on image as input and 2) scene graph generation based on text (i.e. image caption) as input. Various approaches (Xu et al., 2017; Zellers et al., 2018; Gu et al., 2019; Zhong et al., 2021) are proposed for the former category. On the other hand, only a fewer approaches (Schuster et al., 2015; Anderson et al., 2016; Wang et al., 2018; Andrews et al., 2019) are proposed for the latter. In this paper, we focus on the latter category, which is also called textual scene graph parsing. Textual scene graph parsing has the advantage of being able to capture the high-level meaning of the image scene from the text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 200, |
|
"text": "(Johnson et al., 2015)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 534, |
|
"text": "Schroeder and Tripathi, 2020)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 573, |
|
"text": "(Yang et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 574, |
|
"end": 593, |
|
"text": "Zhong et al., 2020)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 652, |
|
"text": "(Hildebrandt et al., 2020;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 653, |
|
"end": 676, |
|
"text": "Damodaran et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 897, |
|
"end": 914, |
|
"text": "(Xu et al., 2017;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 915, |
|
"end": 936, |
|
"text": "Zellers et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 937, |
|
"end": 953, |
|
"text": "Gu et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 954, |
|
"end": 973, |
|
"text": "Zhong et al., 2021)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1055, |
|
"end": 1078, |
|
"text": "(Schuster et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1079, |
|
"end": 1101, |
|
"text": "Anderson et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1102, |
|
"end": 1120, |
|
"text": "Wang et al., 2018;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1121, |
|
"end": 1142, |
|
"text": "Andrews et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most of previous works (Schuster et al., 2015; Anderson et al., 2016; Wang et al., 2018) for scene graph parsing generated scene graphs using dependency parsing to acquire the dependency relationships for all words in a text, as shown in Figure 1 (a). Apart from dependency parsing, there is also another approach for parsing semantic graphs from textual descriptions, which is called abstract mean-ing representation (AMR) proposed by (Banarescu et al., 2013) . AMR abstracts semantic concepts from words, and we therefore consider AMR is more suitable for scene graph parsing. However, the use of dependency parsing appeared to be a common theme in the literature rather than AMR, hence scene graph parsing with AMR has been left as future work in (Anderson et al., 2016; Wang et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 46, |
|
"text": "(Schuster et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 47, |
|
"end": 69, |
|
"text": "Anderson et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 88, |
|
"text": "Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 460, |
|
"text": "(Banarescu et al., 2013)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 773, |
|
"text": "(Anderson et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 792, |
|
"text": "Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 246, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To this end, we investigate the use of AMR with pre-trained language models (PLM), such as BART (Lewis et al., 2020) and T5 (Raffel et al., 2020) , for parsing scene graphs from textual descriptions of visual scenes. We first parse sentences to AMR graphs using a pre-trained AMR parsing model, and then we generate scene graphs from AMR graphs using the PLM.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 116, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 145, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions are the following: i) To the best of our knowledge, ours is the first work for parsing scene graphs from texts using abstract meaning representation (AMR) contrary to the previous works (Schuster et al., 2015; Anderson et al., 2016; Wang et al., 2018) . ii) We extend pre-trained language models such as BART and T5 to generate scene graphs from texts and AMR graphs. iii) Our approach outperforms the previous state-of-the-art result by 9.3% on SPICE metric for scene graph parsing task on intersection of Visual Genome and MS COCO datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 227, |
|
"text": "(Schuster et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 250, |
|
"text": "Anderson et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 269, |
|
"text": "Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Abstract meaning representation (AMR) (Banarescu et al., 2013) is a graph-based semantic representation which captures semantics \"who is doing what to whom\" in a sentence. Each sentence is represented as a rooted, directed, acyclic graph with labels on nodes (e.g. semantic concepts) and edges (e.g. semantic relations). Representative tasks for AMR are Text-to-AMR, capturing the meaning of a sentence within a semantic graph, and AMR-to-Text, generating text from such a graph. AMR2.0 (LDC2017T10) and AMR3.0 (LDC2020T02) datasets are currently actively used, which contain a semantic treebank of over 39, 260 and 59, 255 English natural language sentences, respectively from broadcast conversations, newswire, weblogs and web discussion forums.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract Meaning Representation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To address these tasks, earlier studies used statistical methods. With the development of deep learning, researchers have proposed neural mod-els such as graph-to-sequence (Zhu et al., 2019) , sequence-to-graph (Cai and Lam, 2020) , and neural transition-based parser models (Zhou et al., 2021) . Recently, with the advent of pre-trained language models (PLM), AMR-based models incorporating the generation capability of PLM have been proposed and shown interesting results for various NLP tasks such as information extraction (Huang et al., 2018; Zhang and Ji, 2021 ), text summarization (Liu et al., 2015; Dohare and Karnick, 2017) , and dialogue systems (Bonial et al., 2020) . (Lam et al., 2021) proposed an efficient heuristic algorithm to approximate the optimal solution by formalizing ensemble graph prediction as mining the largest graph that is the most supported by a collection of graph predictions. (Bevilacqua et al., 2021) proposed symmetric parsing and generation (SPRING), which casts AMR tasks as a symmetric transduction task by devising graph linearization and extending the pre-trained encoderdecoder model, BART. In this paper, we utilize pre-trained AMR parsing (i.e. Text-to-AMR) models from (Bevilacqua et al., 2021) to parse AMR graph from sentences since the SPRING model has the best performance among the publicly available pre-trained AMR parsing models 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 190, |
|
"text": "(Zhu et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 230, |
|
"text": "(Cai and Lam, 2020)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 275, |
|
"end": 294, |
|
"text": "(Zhou et al., 2021)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 547, |
|
"text": "(Huang et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 566, |
|
"text": "Zhang and Ji, 2021", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 589, |
|
"end": 607, |
|
"text": "(Liu et al., 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 633, |
|
"text": "Dohare and Karnick, 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 678, |
|
"text": "(Bonial et al., 2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 699, |
|
"text": "(Lam et al., 2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 912, |
|
"end": 937, |
|
"text": "(Bevilacqua et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1216, |
|
"end": 1241, |
|
"text": "(Bevilacqua et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract Meaning Representation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Scene graph proposed by (Johnson et al., 2015 ) is a graph-structured representation that represents rich structured semantics of visual scenes (i.e. images). Nodes in the scene graph represent either an object, an attribute for an object, or a relationship between objects. Edges depict the connection between two nodes. In this subsection, we introduce the study of scene graph parsing based on text. Most of the previous studies (Schuster et al., 2015; Anderson et al., 2016; Wang et al., 2018) used dependency parsing as a common theme. (Schuster et al., 2015) proposed a rule-based and a learned classifier with dependency parsing. (Wang et al., 2018) proposed a customized dependency parser with end-to-end training to parse scene graph. (Andrews et al., 2019) proposed a customized attention graph mechanism using the OpenAI Transformer 2 (Radford and Narasimhan, 2018) . Unlike these studies, we use the AMR approach to parse scene graphs and demonstrate better quantitative performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 45, |
|
"text": "(Johnson et al., 2015", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 455, |
|
"text": "(Schuster et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 478, |
|
"text": "Anderson et al., 2016;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 497, |
|
"text": "Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 564, |
|
"text": "(Schuster et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 637, |
|
"end": 656, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 744, |
|
"end": 766, |
|
"text": "(Andrews et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 846, |
|
"end": 876, |
|
"text": "(Radford and Narasimhan, 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scene Graph Parsing", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "BART (Lewis et al., 2020 ) is a denoising autoencoder for pretraining sequence-to-sequence (seq2seq) models. It uses the standard Transformer (Vaswani et al., 2017 )-based neural machine translation (NMT) architecture. It is constructed based on seq2seq/NMT architecture by combining a bidirectional encoder (Devlin et al., 2019) and a leftto-right decoder (Radford et al., 2019) . BART is trained by corrupting text with an arbitrary noising function (i.e. token masking, infilling, deletion, and sentence permutation) and learning a model to reconstruct the original text. We use both BARTbase (BART model with 6 encoder and decoder layers and around 140M parameters) and BARTlarge (BART model with 12 encoder and decoder layers and nearly 400M parameters) models for our investigation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 24, |
|
"text": "(Lewis et al., 2020", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 308, |
|
"end": 329, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 379, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-trained Language Model", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "T5 (Raffel et al., 2020) is an encoder-decoder unified framework that is pre-trained on a multitask mixture of unsupervised and supervised tasks and for which a wide range of NLP tasks such as translation, classification, and question answering are cast as feeding the model text as input and training it to generate some target text. We use both T5-base (T5 model with 12 encoder and decoder layers and nearly 220M parameters) and T5-large (T5 model with 24 encoder and decoder layers and nearly 770M parameters) models for our examination.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 24, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-trained Language Model", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In this section, we use pre-trained language models (PLM), BART (Lewis et al., 2020) and T5 (Raffel et al., 2020) , as baselines to parse scene graph (SG) from text directly (Text-to-SG). We then describe how to generate scene graphs from AMR graphs (AMR-to-SG) using PLM models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 84, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 92, |
|
"end": 113, |
|
"text": "(Raffel et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We train the pre-trained language models to take each region description of an image as input and generate scene graphs. The PLM models take text as input and map it into a task-specific output sequence. For instance, if the region description \"White street sign with black writing\" is an input, the parsed output, {(street sign, writing), (white-street sign, black-writing), (street sign-with-writing)} will be in the form of {(objects), (attribute-object), (object-relationship-object)}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text-to-SG Parsing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "First, we parse the region descriptions into AMR graphs. Then, we parse the AMR graphs into the scene graphs. For this, we use the two AMR parsing models of SPRING (Bevilacqua et al., 2021) , which are pre-trained on AMR2.0 (LDC2017T10) and AMR3.0 (LDC2020T02) datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 189, |
|
"text": "(Bevilacqua et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AMR-to-SG Parsing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We linearize the AMR graph into a sequence of symbols which will be the input to pre-trained language models, BART and T5, for training. For the linearization technique, we adopt the depth-first search (DFS) based algorithm used in (Konstas et al., 2017) , as it is closely related to the way how natural language syntactic trees are linearized (Bevilacqua et al., 2021) . Thus, as shown in Figure 1 (b) , the input of BART and T5 will be \"(z0 / sign :mod (z1 / street) :ARG1-of (z2 / white-03) :ARG1of (z3 / write-01 :ARG1-of (z4 / black-04)))\", where zo, z1, z2, z3 and z4 are special tokens to handle co-referring nodes, and the output will be in the same format as Text-to-SG parsing output.", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 254, |
|
"text": "(Konstas et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 370, |
|
"text": "(Bevilacqua et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 391, |
|
"end": 404, |
|
"text": "Figure 1 (b)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "AMR-to-SG Parsing", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Datasets For fair comparisons with the existing models, we train and validate our models with the subsets of Visual Genome (VG) (Krishna et al., 2016) and MS COCO (Lin et al., 2014) datasets. The training set is the intersection of the VG and MS COCO train2014 set (34,027 images with 1,070,145 regions). The evaluation set is the intersection of VG and MS COCO val2014 set (17,471 images with 547,795 regions). We follow the same preprocessing steps as in (Wang et al., 2018) for setting the training/test splits.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 150, |
|
"text": "(Krishna et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 181, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 476, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Evaluation To evaluate parsed scene graphs from region descriptions with the ground truth region scene graphs, we use SPICE metric (Anderson et al., 2016) which calculates a F-score over tuples. As mentioned in (Wang et al., 2018) , there is an issue that a node in one graph could be matched to several nodes in the other when SPICE calculates the F-score. Thus, following previous works, we enforce one-to-one matching while calculating the F-score and report the average F-score for all regions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 154, |
|
"text": "(Anderson et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 230, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Scene graph parser F-score Text-to-SG Stanford (Schuster et al., 2015) 0.3549 SPICE (Anderson et al., 2016) 0.4469 CDP (Wang et al., 2018) 0.4967 AG (Andrews et al., 2019) 0 Table 1 : F-score (i.e. SPICE metric) comparison between pre-trained language models (for Text-to-SG and AMR-to-SG) and existing parsers on the intersection of VG (Krishna et al., 2016) and MS COCO (Lin et al., 2014) validation set. CDP and AG are abbreviations of Customized Dependency Parser and Attention Graph, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 70, |
|
"text": "(Schuster et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 84, |
|
"end": 107, |
|
"text": "(Anderson et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 138, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "(Andrews et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 359, |
|
"text": "(Krishna et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 390, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 181, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Experimental Settings In our experiments, We set the number of epoch to 5, the batch size to 32, and learning rate to 0.0005 with a weight decay of 0.004. It takes about a day to train BART-base, BART-large, and T5-base models and around four days to train T5-large model using two Tesla V100 with 32 GB graphic memory. Table 1 shows results of the F-score comparison between pre-trained language models (PLM) with both Text-to-SG and AMR-to-SG and existing parsers on the intersection of VG and MS COCO validation set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 327, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Text-to-SG We observe that the performance of PLM models is relatively higher than dependency parsing based models (i.e. Stanford, SPICE and Customized Dependency Parser) and shows comparable results with the previous state-of-the-art model, Attention Graph (AG), which used customized attention graph with pre-trained transformer model. Furthermore, we find that the larger the model size, the better the performance. We expect to improve the performance of PLM models with hyperparameter tuning, which we perform as our future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "AMR-to-SG All parsing models using AMR (AMR-to-SG) not only outperform the previous state-of-the-art model, Attention Graph (AG), but also show better performance than Text-to-SG PLM-based models. All of AMR-to-SG models for AMR 2.0 achieves an average of 8.92% performance improvement, and 8.91% for AMR 3.0. In particular, our best model (T5-large for AMR 2.0) outperforms the previous state-of-the-art model by 9.3%. Interestingly, despite the same PLM model, when comparing the case where AMR graph is input instead of text, BART shows an average of 10.32% performance improvement for AMR 2.0 and 10.22% for AMR 3.0, respectively. T5 shows an average of 10.43% performance improvement for AMR 2.0 and 12.87% for AMR 3.0, respectively. In consequence, we find that the AMR based approach captures high-level abstract semantics of text better than dependency parsers and the other baseline models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this work, we investigate the application of abstract meaning representation (AMR) for parsing scene graph by using pre-trained language models (PLM), BART and T5, with AMR parsing model of SPRING. We conducted two sets of experiments: 1) scene graph parsing using PLM models, directly from region descriptions, and 2) scene graph parsing using PLM models from AMR graphs parsed from region descriptions via AMR parsing pretrained models. Our results show AMR graphs capture high-level abstract semantics of region descriptions. We evaluate our approach using the SPICE metric score. The results of Text-to AMR are comparable and of AMR-to-Text outperform the existing state-of-the-art models by 9.3%. In our future work, we will investigate an adapter-based method (Ribeiro et al., 2021) to encode graph structures into PLM models to improve the performance of textual scene graph parsing. Furthermore, we will examine our approach based on the lately published, pre-trained AMR parsing model, AMRBART 3 (Bai et al., 2022) . As our scene graph parser performance improves further, we expect to be able to use it to automatically generate either an image scene graph or video scene graph datasets with less biased and more diverse labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1008, |
|
"end": 1026, |
|
"text": "(Bai et al., 2022)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://github.com/SapienzaNLP/spring 2 This model consists of a BPE (Byte-Pair-Encoding) subword embedding layer followed by 12-layers of decoder-only transformer with masked self-attention heads.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/muyeby/AMRBART", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was partly supported by the Institute of Information Communications Technology Planning Evaluation (2015-0-00310-SW.StarLab/20%, 2019-0-01371-BabyMind/20%, 2021-0-02068-AIHub/10%, 2021-0-01343-GSAI/10%, 2022-0-00951-LBA/20%, 2022-0-00166-PICA/20%) grant funded by the Korean government.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Spice: Semantic propositional image caption evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Basura", |
|
"middle": [], |
|
"last": "Fernando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Gould", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computer Vision -ECCV 2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "382--398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. 2016. Spice: Semantic propositional image caption evaluation. In Computer Vision - ECCV 2016, pages 382-398, Cham. Springer Inter- national Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Scene graph parsing by attention graph", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Andrews", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Yew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Chia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Witteveen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Andrews, Yew Ken Chia, and Sam Witteveen. 2019. Scene graph parsing by attention graph. CoRR, abs/1909.06273.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Graph pre-training for amr parsing and generation", |
|
"authors": [ |
|
{ |
|
"first": "Xuefeng", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuefeng Bai, Yulong Chen, and Yue Zhang. 2022. Graph pre-training for amr parsing and generation. ArXiv, abs/2203.07836.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Abstract Meaning Representation for sembanking", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Banarescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Bonial", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shu", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madalina", |
|
"middle": [], |
|
"last": "Georgescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Griffitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Hermjakob", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "178--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitt, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract Meaning Representation for sembanking. In Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Dis- course, pages 178-186, Sofia, Bulgaria. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "One SPRING to rule them both: Symmetric AMR semantic parsing and generation without a complex pipeline", |
|
"authors": [ |
|
{ |
|
"first": "Michele", |
|
"middle": [], |
|
"last": "Bevilacqua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rexhina", |
|
"middle": [], |
|
"last": "Blloshmi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michele Bevilacqua, Rexhina Blloshmi, and Roberto Navigli. 2021. One SPRING to rule them both: Sym- metric AMR semantic parsing and generation without a complex pipeline. In Proceedings of AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Dialogue-AMR: Abstract Meaning Representation for dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Bonial", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Donatelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Abrams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lukin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Tratz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Marge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Artstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Traum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clare", |
|
"middle": [], |
|
"last": "Voss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "684--695", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Bonial, Lucia Donatelli, Mitchell Abrams, Stephanie M. Lukin, Stephen Tratz, Matthew Marge, Ron Artstein, David Traum, and Clare Voss. 2020. Dialogue-AMR: Abstract Meaning Representation for dialogue. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 684-695, Marseille, France. European Language Re- sources Association.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "AMR parsing via graphsequence iterative inference", |
|
"authors": [ |
|
{ |
|
"first": "Deng", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1290--1301", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.119" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deng Cai and Wai Lam. 2020. AMR parsing via graph- sequence iterative inference. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 1290-1301, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Understanding the role of scene graphs in visual question answering", |
|
"authors": [ |
|
{ |
|
"first": "Sharanya", |
|
"middle": [], |
|
"last": "Vinay Damodaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akshay", |
|
"middle": [], |
|
"last": "Chakravarthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anjana", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teruko", |
|
"middle": [], |
|
"last": "Umapathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuta", |
|
"middle": [], |
|
"last": "Mitamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noa", |
|
"middle": [], |
|
"last": "Nakashima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenhui", |
|
"middle": [], |
|
"last": "Garc\u00eda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vinay Damodaran, Sharanya Chakravarthy, Akshay Kumar, Anjana Umapathy, Teruko Mitamura, Yuta Nakashima, Noa Garc\u00eda, and Chenhui Chu. 2021. Understanding the role of scene graphs in visual ques- tion answering. ArXiv, abs/2101.05479.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Text summarization using abstract meaning representation", |
|
"authors": [ |
|
{ |
|
"first": "Shibhansh", |
|
"middle": [], |
|
"last": "Dohare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harish", |
|
"middle": [], |
|
"last": "Karnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shibhansh Dohare and Harish Karnick. 2017. Text summarization using abstract meaning representa- tion. ArXiv, abs/1706.01678.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Scene graph generation with external knowledge and image reconstruction", |
|
"authors": [ |
|
{ |
|
"first": "Jiuxiang", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Handong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfei", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingyang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiuxiang Gu, Handong Zhao, Zhe Lin, Sheng Li, Jianfei Cai, and Mingyang Ling. 2019. Scene graph genera- tion with external knowledge and image reconstruc- tion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Scene graph reasoning for visual question answering. ArXiv, abs", |
|
"authors": [ |
|
{ |
|
"first": "Marcel", |
|
"middle": [], |
|
"last": "Hildebrandt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajat", |
|
"middle": [], |
|
"last": "Koner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Volker Tresp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "G\u00fcnnemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcel Hildebrandt, Hang Li, Rajat Koner, Volker Tresp, and Stephan G\u00fcnnemann. 2020. Scene graph reasoning for visual question answering. ArXiv, abs/2007.01072.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Zero-shot transfer learning for event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Lifu", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clare", |
|
"middle": [], |
|
"last": "Voss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2160--2170", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1201" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lifu Huang, Heng Ji, Kyunghyun Cho, Ido Dagan, Se- bastian Riedel, and Clare Voss. 2018. Zero-shot transfer learning for event extraction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2160-2170, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Image retrieval using scene graphs", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Stark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li-Jia", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Shamma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei- Fei. 2015. Image retrieval using scene graphs. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Neural AMR: Sequence-to-sequence models for parsing and generation", |
|
"authors": [ |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Konstas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinivasan", |
|
"middle": [], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "146--157", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ioannis Konstas, Srinivasan Iyer, Mark Yatskar, Yejin Choi, and Luke Zettlemoyer. 2017. Neural AMR: Sequence-to-sequence models for parsing and gener- ation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 146-157, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Visual genome: Connecting language and vision using crowdsourced dense image annotations", |
|
"authors": [ |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuke", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oliver", |
|
"middle": [], |
|
"last": "Groth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Hata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Kravitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannis", |
|
"middle": [], |
|
"last": "Kalantidis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li-Jia", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Shamma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yan- nis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. 2016. Visual genome: Connecting language and vision using crowdsourced dense image annotations.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Ensembling graph predictions for amr parsing", |
|
"authors": [ |
|
{ |
|
"first": "Gabriele", |
|
"middle": [], |
|
"last": "Hoang Thanh Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yufang", |
|
"middle": [], |
|
"last": "Picco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Young-Suk", |
|
"middle": [], |
|
"last": "Hou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzung", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vanessa", |
|
"middle": [], |
|
"last": "Phan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramon", |
|
"middle": [ |
|
"Fernandez" |
|
], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Astudillo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hoang Thanh Lam, Gabriele Picco, Yufang Hou, Young- Suk Lee, Lam M. Nguyen, Dzung T. Phan, Vanessa L\u00f3pez, and Ramon Fernandez Astudillo. 2021. En- sembling graph predictions for amr parsing. In Ad- vances in Neural Information Processing Systems 35: Annual Conference on Neural Information Process- ing Systems 2021, NeurIPS 2021, December 6-14, 2021, virtual.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelrahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.703" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and com- prehension. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Microsoft coco: Common objects in context", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deva", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C. Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Toward abstractive summarization using semantic representations", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Flanigan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norman", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Sadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Liu, Jeffrey Flanigan, Sam Thomson, Norman M. Sadeh, and Noah A. Smith. 2015. Toward abstractive summarization using semantic representations. In NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Improving language understanding by generative pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford and Karthik Narasimhan. 2018. Im- proving language understanding by generative pre- training.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "140", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the lim- its of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Structural adapters in pretrained language models for AMR-to-Text generation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4269--4282", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Yue Zhang, and Iryna Gurevych. 2021. Structural adapters in pretrained language models for AMR-to-Text generation. In Proceed- ings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 4269-4282, Online and Punta Cana, Dominican Republic. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Structured query-based image retrieval using scene graphs", |
|
"authors": [ |
|
{ |
|
"first": "Brigit", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subarna", |
|
"middle": [], |
|
"last": "Tripathi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "680--684", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brigit Schroeder and Subarna Tripathi. 2020. Struc- tured query-based image retrieval using scene graphs. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 680- 684.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Generating semantically precise scene graphs from textual descriptions for improved image retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angel", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Fourth Workshop on Vision and Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "70--80", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W15-2812" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Schuster, Ranjay Krishna, Angel Chang, Li Fei-Fei, and Christopher D. Manning. 2015. Gen- erating semantically precise scene graphs from tex- tual descriptions for improved image retrieval. In Proceedings of the Fourth Workshop on Vision and Language, pages 70-80, Lisbon, Portugal. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Cross-modal scene graph matching for relationship-aware image-text retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Sijin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiping", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziwei", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiguang", |
|
"middle": [], |
|
"last": "Shan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xilin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1497--1506", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/WACV45572.2020.9093614" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sijin Wang, Ruiping Wang, Ziwei Yao, Shiguang Shan, and Xilin Chen. 2020. Cross-modal scene graph matching for relationship-aware image-text retrieval. In 2020 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1497-1506.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Scene graph parsing as dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yu-Siang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenxi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohui", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Yuille", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "397--407", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1037" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-Siang Wang, Chenxi Liu, Xiaohui Zeng, and Alan Yuille. 2018. Scene graph parsing as dependency parsing. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long Papers), pages 397-407, New Orleans, Louisiana. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Scene graph generation by iterative message passing", |
|
"authors": [ |
|
{ |
|
"first": "Danfei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuke", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Choy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danfei Xu, Yuke Zhu, Christopher Choy, and Li Fei-Fei. 2017. Scene graph generation by iterative message passing. In Computer Vision and Pattern Recognition (CVPR).", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Auto-encoding scene graphs for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaihua", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanwang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfei", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10677--10686", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2019.01094" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Yang, Kaihua Tang, Hanwang Zhang, and Jianfei Cai. 2019. Auto-encoding scene graphs for image captioning. In 2019 IEEE/CVF Conference on Com- puter Vision and Pattern Recognition (CVPR), pages 10677-10686.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Neural motifs: Scene graph parsing with global context", |
|
"authors": [ |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Zellers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. 2018. Neural motifs: Scene graph parsing with global context. In Conference on Computer Vision and Pattern Recognition.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Abstract Meaning Representation guided graph encoding and decoding for joint information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Zixuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--49", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.naacl-main.4" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zixuan Zhang and Heng Ji. 2021. Abstract Meaning Representation guided graph encoding and decoding for joint information extraction. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 39-49, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Learning to generate scene graph from natural language supervision", |
|
"authors": [ |
|
{ |
|
"first": "Yiwu", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianwei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenliang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiwu Zhong, Jing Shi, Jianwei Yang, Chenliang Xu, and Yin Li. 2021. Learning to generate scene graph from natural language supervision. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Comprehensive image captioning via scene graph decomposition", |
|
"authors": [ |
|
{ |
|
"first": "Yiwu", |
|
"middle": [], |
|
"last": "Zhong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liwei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianshu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yiwu Zhong, Liwei Wang, Jianshu Chen, Dong Yu, and Yin Li. 2020. Comprehensive image captioning via scene graph decomposition. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "AMR parsing with action-pointer transformer", |
|
"authors": [ |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tahira", |
|
"middle": [], |
|
"last": "Naseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ram\u00f3n", |
|
"middle": [], |
|
"last": "Fernandez Astudillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5585--5598", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.naacl-main.443" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiawei Zhou, Tahira Naseem, Ram\u00f3n Fernandez As- tudillo, and Radu Florian. 2021. AMR parsing with action-pointer transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 5585-5598, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Modeling graph structure in transformer for better AMR-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junhui", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhua", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Longhua", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5459--5468", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1548" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Zhu, Junhui Li, Muhua Zhu, Longhua Qian, Min Zhang, and Guodong Zhou. 2019. Modeling graph structure in transformer for better AMR-to-text gen- eration. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 5459-5468, Hong Kong, China. Association for Com- putational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "An example of (a) dependency parsing and (b) abstract meaning representation (AMR) parsing from textual description (i.e. region description) of \"White street sign with black writing\".", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
} |
|
} |
|
} |
|
} |