|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:13:48.747104Z" |
|
}, |
|
"title": "Exploiting Image-Text Synergy for Contextual Image Captioning", |
|
"authors": [ |
|
{ |
|
"first": "Sreyasi", |
|
"middle": [ |
|
"Nag" |
|
], |
|
"last": "Chowdhury", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University \u2021 Hasso Plattner Institute", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rajarshi", |
|
"middle": [], |
|
"last": "Bhowmik", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University \u2021 Hasso Plattner Institute", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hareesh", |
|
"middle": [], |
|
"last": "Ravi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University \u2021 Hasso Plattner Institute", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "De Melo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University \u2021 Hasso Plattner Institute", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Razniewski", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University \u2021 Hasso Plattner Institute", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Gerhard", |
|
"middle": [], |
|
"last": "Weikum", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Rutgers University \u2021 Hasso Plattner Institute", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Modern web content-news articles, blog posts, educational resources, marketing brochures-is predominantly multimodal. A notable trait is the inclusion of media such as images placed at meaningful locations within a textual narrative. Most often, such images are accompanied by captions-either factual or stylistic (humorous, metaphorical, etc.)making the narrative more engaging to the reader. While standalone image captioning has been extensively studied, captioning an image based on external knowledge such as its surrounding text remains under-explored. In this paper, we study this new task: given an image and an associated unstructured knowledge snippet, the goal is to generate a contextual caption for the image.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Modern web content-news articles, blog posts, educational resources, marketing brochures-is predominantly multimodal. A notable trait is the inclusion of media such as images placed at meaningful locations within a textual narrative. Most often, such images are accompanied by captions-either factual or stylistic (humorous, metaphorical, etc.)making the narrative more engaging to the reader. While standalone image captioning has been extensively studied, captioning an image based on external knowledge such as its surrounding text remains under-explored. In this paper, we study this new task: given an image and an associated unstructured knowledge snippet, the goal is to generate a contextual caption for the image.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In multimodal (image-text) documents, images are typically accompanied by captions. These may contain specific knowledge about the narrative -location, names of persons etc. -or may exhibit thematic knowledge grounding the sentimental value of the image in the narrative. The image captions explicitly or implicitly refer to the image and its surrounding text, and play a major role in engaging the reader. We call this type of captions contextual captions, and Figure 1 illustrates the corresponding Contextual Image Captioning problem.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 462, |
|
"end": 470, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Generating captions for standalone images (Hossain et al., 2019; Wang et al., 2020) or summarizing a piece of text (See et al., 2017; Lin and Ng, 2019) are well-studied problems. However, generating image captions accounting for contextual knowledge is a largely unexplored task and poses many challenges. Related tasks include multimodal summarization Zhuge, 2018, 2019a) and title generation (Murao et al., 2019) . Multimodal summarization usually involves segmentation and I recently moved to Buffalo, NY and every day I am discovering how beautiful this town is. I took this pic...and I was thrilled about it! I wanted to share the pallet of colors the sunset had that evening. Generated Contextual Captions: -A beautiful sunset path to heaven.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 64, |
|
"text": "(Hossain et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 65, |
|
"end": 83, |
|
"text": "Wang et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 115, |
|
"end": 133, |
|
"text": "(See et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 151, |
|
"text": "Lin and Ng, 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 372, |
|
"text": "Zhuge, 2018, 2019a)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 414, |
|
"text": "(Murao et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "-A sunset...unknown artist. sorting of both the modalities or has specific templates along which the summary is generated (See et al., 2017) . In contrast, generating contextual captions requires conditionally deciding to follow, lead or negate the knowledge offered by the context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 140, |
|
"text": "(See et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Inadequacy of Prior Work. Image captioning and text summarization are unimodal, and ignore information present in the dormant modality. Multimodal summarization and news image captioning (Biten et al., 2019) usually entail captions with explicit references to the context, and may be achieved with a copy mechanism (Gu et al., 2016) that can selectively copy information (e.g., named entities such as names of people, organizations, geographical locations etc.) from the surrounding text to the caption. However, most social media driven content is affective and requires implicit reasoning about the context. For example, for an image of the Grand Canyon, we might encounter captions such as \"perfect for a lovely hike\" or \"too tired to walk\", due to the subjectivity of the task, which requires inference based on the context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 315, |
|
"end": 332, |
|
"text": "(Gu et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Captioning and create a new dataset from Reddit posts with images, titles and comments. \u2022 We propose an end-to-end trained neural model for the Contextual Image Captioning task and comprehensively evaluate its performance using quantitative and qualitative measures. \u2022 We study how various factors affect the generation of novel contextual captions. To foster follow-up research we release the dataset and code, available at https://github.com/ Sreyasi/contextual_captions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approach and Contribution. \u2022 We formulate the novel task of Contextual Image", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Image Captioning. Prior work on captioning conditioned only on images (Farhadi et al., 2010; Vinyals et al., 2015; Karpathy and Li, 2015; Krause et al., 2017) has been successful for descriptive captions with explicit grounding to image objects. Recently, captions with sentimental and abstract concepts have been explored (Gan et al., 2017; Chandrasekaran et al., 2018; Park et al., 2017; Shuster et al., 2019) . Although external knowledge bases like DBpedia (factual knowledge) (Wu et al., 2018) and ConceptNet (commonsense knowledge) (Zhou et al., 2019) have been leveraged, all prior work ignores the knowledge present in the text surrounding images in social media and other domains. Contextual Image Captioning leverages the latter kind of knowledge. Multimodal Summarization. Research on multimodal embeddings (Laina et al., 2019; Xia et al., 2020; Scialom et al., 2020) has facilitated studying image-text data. Summarization of multimodal documents (Chu and Kao, 2017; Zhu et al., 2018; Hessel et al., 2019) proceeds by aligning a subset of images with extracted (Chen and Zhuge, 2019a) , or generated (Chen and Zhuge, 2018) text segments from the original document. In contrast, image captions in our dataset do not explicitly summarize the associated text and rather act as a short commentary that captures knowledge from both modalities. News Image Captioning. A task similar to our problem is captioning images within news articles (Tran et al., 2020; Chen and Zhuge, 2019b) . A key challenge here is to correctly identify and generate named entities (Tran et al., 2020) . However, news image captions tend to be descriptive compared to the subjective nature of captions in our dataset representing common social media content.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 92, |
|
"text": "(Farhadi et al., 2010;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 114, |
|
"text": "Vinyals et al., 2015;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 115, |
|
"end": 137, |
|
"text": "Karpathy and Li, 2015;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 158, |
|
"text": "Krause et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 341, |
|
"text": "(Gan et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 370, |
|
"text": "Chandrasekaran et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 389, |
|
"text": "Park et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 390, |
|
"end": 411, |
|
"text": "Shuster et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 498, |
|
"text": "(Wu et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 557, |
|
"text": "(Zhou et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 818, |
|
"end": 838, |
|
"text": "(Laina et al., 2019;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 839, |
|
"end": 856, |
|
"text": "Xia et al., 2020;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 878, |
|
"text": "Scialom et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 959, |
|
"end": 978, |
|
"text": "(Chu and Kao, 2017;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 979, |
|
"end": 996, |
|
"text": "Zhu et al., 2018;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 997, |
|
"end": 1017, |
|
"text": "Hessel et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1073, |
|
"end": 1096, |
|
"text": "(Chen and Zhuge, 2019a)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1112, |
|
"end": 1134, |
|
"text": "(Chen and Zhuge, 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1446, |
|
"end": 1465, |
|
"text": "(Tran et al., 2020;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1466, |
|
"end": 1488, |
|
"text": "Chen and Zhuge, 2019b)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1565, |
|
"end": 1584, |
|
"text": "(Tran et al., 2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, the only existing image-text caption datasets are from the news domain (e.g., Daily Mail Corpus) containing nonaffective descriptive captions with mentions of named entities. Instead, we consider Reddit, which offers a rich source of multimodal data. Out of the image-related subreddits, /r/pics is particularly suitable for our problem because of the nature of posts. Firstly, the posts do not contain expert jargon, unlike other subreddits like /r/photographs. Secondly, the image captions are mostly affective and not drab descriptions. Lastly, post frequency is high, presenting a big dataset. Data Scraping. We scrape the subreddit /r/pics to collect 250,000 posts over the span of a year. For each post, we grab the image, the post title, and 1-10 comments. We consider the post title as ground truth caption since it is written by the image poster, ensuring a consistent and coherent intent. The comments are concatenated, preserving their tree structure, to serve as the unstructured knowledge associated with the image. Inappropriate posts that do not adhere to community standards and were flagged by moderators are removed. Data Characteristics. The collected images do not adhere to any particular subject or theme. The paragraphs are~59.2 words long, and the captions are~10.6 words long on an average.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In some posts, captions and comments may contain different named entities (NE), making prediction of the ground truth NE difficult. For example, the caption \"My friend and I are en route to the Grand Canyon\" may be accompanied with the comment \"Try to hike down to the Colorado. Also visit Zion National Park!\" The NEs in the paragraph (Colorado, Zion) do not match that in the caption (Grand Canyon). Owing to this characteristic, we study two distinct variants of the dataset -one containing NEs and the other without NEs. We denote these variants as +NE andNE, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The comments sometimes exhibit topic drift, e.g., a comment on the Grand Canyon post may be \"I remember my last trip to India...we had spicy food!\". Hence, we also study variants with ensured context overlap -one common word (ignoring stop words) between caption and comments. These variants are suffixed overlap -e.g. +NE-overlap.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We report experimental results on all of these variants, adopting a 30,000/8,000/8,000 train/val/test split for each of them. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We refer to the text context associated with each image as 'paragraph'. This offers external knowledge which may be absent in the image modality alone. Figure 2 shows our proposed model architecture. Given an input image I and an associated input", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 160, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "paragraph P = {w p 1 , . . . , w p M } of length M , our model (an encoder-decoder architecture) generates a caption C = {w c 1 , . . . , w c N } of length N .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For image encoding, we use features extracted from a pre-trained ResNet152 (He et al., 2016) model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 92, |
|
"text": "(He et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To encode the input paragraph, we deploy a bidirectional LSTM (BiLSTM). The outputs of the BiLSTM, denoted as G = {g 1 , . . . , g M }, where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "g i = BiLSTM(x i , g i\u22121 )\u2200i \u2208 i \u2208 {1, . . . , M },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "is the encoded representation of the input paragraph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "x i is the vector embedding of the word w p i . We deploy a unidirectional LSTM for sequential decoding of the caption C that leverages both the encoded image and paragraph representations. The image embedding is provided as an input to the decoder LSTM at timestep t = 1. In all subsequent timesteps, the decoder input is the embedding y t\u22121 of the previous token w c t\u22121 . The decoder state at each timestep t is obtained as h t = LSTM(y t\u22121 , h t\u22121 ). To incorporate contextual information from the input paragraph, we concatenate an attention-weighted sum of the encoder states, denoted asG t , to the current state h t .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "At each decoder time step t \u2208 {2, . . . N }, the attention weights \u03b1 t over the encoder states depend on the current decoder state h t and the encoder states G. Formally,G", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "t = M i=i \u03b1 t i g i (1) \u03b1 t i = v (W g g i + W h h t + b) M i =1 v (W g g i + W h h t + b)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Finally, we pass the concatenated output through two dense layers with a non-linear activation layer (e.g, ReLU) placed in between. The output logits are then passed through a Softmax function to obtain the output distribution p(.) over the vocabulary. We train our model end-to-end by minimizing the negative log-likelihood, i.e., \u03b8 * = argmin \u03b8 \u2212log p(C | I, P ; \u03b8). Note that we obtain the input embeddings, x i , and y t , of the encoder and decoder, respectively, from the embedding layer of a pretrained BERT BASE model. The model's objective is to learn the optimal parameters \u03b8 * to maximize the log-likelihood log p(C|I, P ; \u03b8). Therefore, we train our model end-to-end by minimizing the negative loglikelihood defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(\u03b8) = N t=1 \u2212 log p(w c t | w c 1 , . . . , w c t\u22121 , I, P ; \u03b8)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "5 Experiments and Results", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Contextual Captioning Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Metrics. We use the MSCOCO (Lin et al., 2014) automatic caption evaluation tool 1 to quantitatively evaluate our proposed model variants using the BLEU-1, ROUGE-L, CIDEr, and SPICE metrics. In addition, we also report scores for semantic similarity between ground truth (c gt ) and generated (c gen ) captions:", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 45, |
|
"text": "(Lin et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "B LE U -1 RO U G E-L C ID", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "SemSim(c gt , c gen ) = cosine(v cgt , v cgen )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": ", where v cgt and v cgen are the mean vectors of constituent words in the respective captions from 300-dimensional GloVe embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Baselines. To the best of our knowledge, there is no existing work that studies contextual image captioning. Therefore, we present two baselines that can also be regarded as ablated versions of our model: Image-only and Text-only captioning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Results. In Table 1 , we report scores 2 for the baselines and our model variants. Recall from Section 3 that our models are based on various data splits: +NE,NE, and their respective overlap variants. We observe that for the +NE split, contextual captions are not better than the unimodal baselines on ngram overlap based scores. This can be attributed to the nature of the dataset: NEs in the paragraph differ from those in ground truth captions. Since contextual captions draw inference from the paragraphs, the predicted NEs differ from ground truth captions as well, leading to lower scores for n-gram overlap based metrics. For theNE splits as well as both the overlap splits, contextual captions fare better than the baselines. The observed low scores for BLEU-1, ROUGE-L, CIDEr and SPICE hint towards the subjectivity of the task. N-gram overlap based metrics do not accommodate varied interpretations and linguistic diversity. Figure 3 exemplifies how image-only captions for different images are often similar, while contextual captions are linguistically richer. High average SemSim scores of contextual captions are indicative of their thematic similarity with the ground truth. Note that the splits with enforced similarity (-overlap) between paragraph and caption fare better on SemSim, leading to the conjecture that with a cleaner dataset, it would be possible to generate very relevant contextual captions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 936, |
|
"end": 944, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Quantitative Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Setup. The scope of this evaluation is to analyze the different splits of our dataset. A user study was set up on the crowd-sourcing platform Appen 3 . 250 test samples were studied. For each sample, users were shown the image and its associated paragraph, and were asked to rate 6 captions (4 contextual captions and 2 baselines from Table 1) on a scale from 1 (irrelevant) to 5 (highly relevant). Observations. We observe that for 80% of samples (201 out of 250), at least one of the 4 contextual captioning models is rated strictly higher than both baselines, and for 95% of samples they are at least as good as both baselines. A variant-wise analysis of this is shown in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 675, |
|
"end": 682, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In 75% of samples, contextual captions were rated highest among all 6 captions. The variantwise analysis of the same is shown in Table 3 . We identify three categories of samples: \u2022 Significant: samples where at least one of the 6 variants generate a caption with rating \u2265 3. These constitute 46% of samples (115/250). \u2022 Insignificant: samples on which all 6 variants obtain a rating < 3. Here, paragraphs show substantial randomness and offer little context for the image. It appears impossible to generate good contextual captions for such samples. \u2022 Bad-base: samples which are insignificant (rating < 3) with respect to both baselines. These constitute 80% of samples (201/250). For 86% of Significant samples (99/115), contextual captions were rated higher than the baselines. A detailed analysis is given in Table 4 . The ratings of 33% (67 of 201) of Bad-base samples were made significant, i.e., improved to strictly \u2265 3, by the best contextual captioning model variant. In other words, contextual captioning generates superior captions for samples with inferior baseline captions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 136, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 814, |
|
"end": 821, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "NE-overlap emerged as the best-suited contextual captioning variant in both quantitative and qualitative evaluations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Factorial Experiment. We conduct another study taking the form of a 2 \u00d7 2 \u00d7 2 full factorial experiment based on three factors -presence of NEs, caption-paragraph overlap, and use of pre-trained BERT token embeddings. We study the effect of these factors with a user study with all factor combinations. The effect of each of the factors can be seen in Figure 4 . Using BERT token embeddings is by far the most effective in enhancing caption qual-ity. It is interesting to note that presence of NEs (including its interaction with other factors) has a negative effect -captions without NEs are rated higher by human evaluators. Caption-paragraph overlap splits are also rated higher, which indicates that high inter-modality content overlap is necessary for generating good contextual captions. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 352, |
|
"end": 360, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Named Entities in Captions. Our user study shows that contextual captions with named entities (NE) are less preferred by humans. We conjecture that a lack of strong cues from the paragraphs lead to incorrectly generated NEs. Future work should also explore copy mechanisms to copy NEs from paragraphs to captions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Caption Quality. We observe that the baseline captions do not show linguistic diversity (Figure 3) . \"The view from. . . \", \"My friend and I. . . \" etc. are common templates learned by the models. We conjecture that training the model on samples containing coherent paragraphs that have high content overlap with the image would yield nicer captions. We partially emulate this in our -overlap splits, which indeed show better model performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 98, |
|
"text": "(Figure 3)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We propose the novel task of Contextual Image Captioning that exploits complementary knowledge from different modalities in multimodal contents. To facilitate a thorough study of this task, we curate a new dataset comprising \u223c250,000 multimodal Reddit posts. We provide an analysis of the dataset along with experimental results to identify interesting factors that determine the quality of generated contextual captions. We hope that our work will kindle and support follow-up research on this under-explored task, with downstream applications such as content authoring tools and multimodal conversational agents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A.1 Dataset Details", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Total number of samples: 242,767", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Samples with named entities (NE) in caption: 137,732 (56.82%)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Samples with no NE in caption: 104,653 (43.18%)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We ensure a context overlap between paragraph and caption with the following splits:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 +NE samples with one common word between paragraph and caption: 50,730 (20.93%). These are named +NE-overlap in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 121, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 +NE samples with two common words between paragraph and caption: 23,283 (9.61%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022NE samples with one common word between paragraph and caption: 38,301 (15.80%). These are namedNE-overlap in Table 1. \u2022NE samples with two common words between paragraph and caption: 15,070 (6.22%)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 118, |
|
"text": "Table 1.", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use SpaCy to detect named entities in captions. SpaCy detects 18 kinds of named entities 4 . TIME, MONEY, PERCENT, and LANGUAGE were not considered since they include common conversational phrases like \"day before yesterday\", \"my two cents\", \"an English breakfast\" etc. Examples of captions with NEs: \"Just the (Earth/LOC) letting off some steam (Iceland/GPE)\", \"The (first/CARDINAL) Chipotle , opened in (Denver/GPE) in (1993/DATE).\" Examples of captions without NEs: \"Texture of the paint on a skull I painted.\", \"My girlfriend and I handle social situations differently.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In future work, the NE types could be leveraged to learn positional relationships in sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Appendix", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our architecture is developed in PyTorch. The number of samples in all train/val/test splits is 30,000/8000/8000. Each model is trained for 20 epochs, with a batch size of 16. On a Tesla V100-PCIE-16 GB GPU, training 1 epoch taken 8 min. 4 https://spacy.io/api/annotation# named-entities For each model variant, the best validation model is used for testing. We experiment with models using pre-trained BERT token embeddings, as well as learning token embeddings from scratch (with a vocabulary size of 100,000). We observe that BERT token embeddings have a positive effect on the quality of captions (Figure 4) , and hence consider this configuration as default.", |
|
"cite_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 239, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 601, |
|
"end": 611, |
|
"text": "(Figure 4)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.2 Experimental Setup", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Validation Performances of Test Models. We train each model for 20 epochs and chose the best validation model for testing. In Table 5 we report the validation losses of our reported test models. A.4 Examples Table 5 shows a few good and bad examples of contextual captions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 133, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 215, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A.3 Quantitative Evaluation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/tylin/coco-caption", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The BLEU-1 and ROUGE-L scores are multiplied by 100, and CIDEr and SPICE scores are multiplied by 10 following the standard practice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://appen.com, formerly named Figure8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Mar\u00e7al Rusi\u00f1ol, and Dimosthenis Karatzas. 2019. acute; good news, everyone! context driven entity-aware captioning for news images", |
|
"authors": [ |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "Ali Furkan Biten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "G\u00f3mez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Furkan Biten, Llu\u00eds G\u00f3mez, Mar\u00e7al Rusi\u00f1ol, and Dimosthenis Karatzas. 2019. acute; good news, ev- eryone! context driven entity-aware captioning for news images. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Punny captions: Witty wordplay in image descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Chandrasekaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bansal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arjun Chandrasekaran, Devi Parikh, and Mohit Bansal. 2018. Punny captions: Witty wordplay in image de- scriptions. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Abstractive textimage summarization using multi-modal attentional hierarchical RNN", |
|
"authors": [ |
|
{ |
|
"first": "Jingqiang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhuge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingqiang Chen and Hai Zhuge. 2018. Abstractive text- image summarization using multi-modal attentional hierarchical RNN. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Extractive summarization of documents with images based on multi-modal RNN", |
|
"authors": [ |
|
{ |
|
"first": "Jingqiang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhuge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Future Gener. Comput. Syst", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingqiang Chen and Hai Zhuge. 2019a. Extractive summarization of documents with images based on multi-modal RNN. Future Gener. Comput. Syst.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "News image captioning based on text summarization using image as query", |
|
"authors": [ |
|
{ |
|
"first": "Jingqiang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhuge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "SKG", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingqiang Chen and Hai Zhuge. 2019b. News image captioning based on text summarization using image as query. In SKG.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Blog article summarization with image-text alignment techniques", |
|
"authors": [ |
|
{ |
|
"first": "Wei-Ta", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Chih", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ISM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei-Ta Chu and Ming-Chih Kao. 2017. Blog arti- cle summarization with image-text alignment tech- niques. In ISM.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Every picture tells a story: Generating sentences from images", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seyyed", |
|
"middle": [], |
|
"last": "Mohammad Mohsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [ |
|
"Amin" |
|
], |
|
"last": "Hejrati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Sadeghi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cyrus", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Rashtchian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Forsyth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Farhadi, Seyyed Mohammad Mohsen Hejrati, Mohammad Amin Sadeghi, Peter Young, Cyrus Rashtchian, Julia Hockenmaier, and David A. Forsyth. 2010. Every picture tells a story: Gener- ating sentences from images. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Stylenet: Generating attractive visual captions with styles", |
|
"authors": [ |
|
{ |
|
"first": "Chuang", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuang Gan, Zhe Gan, Xiaodong He, Jianfeng Gao, and Li Deng. 2017. Stylenet: Generating attractive visual captions with styles. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Incorporating copying mechanism in sequence-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jiatao", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengdong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Victor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiatao Gu, Zhengdong Lu, Hang Li, and Victor O. K. Li. 2016. Incorporating copying mechanism in sequence-to-sequence learning. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Deep residual learning for image recognition", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. He, X. Zhang, S. Ren, and J. Sun. 2016. Deep resid- ual learning for image recognition. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Unsupervised discovery of multimodal links in multi-image, multi-sentence documents", |
|
"authors": [ |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Hessel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mimno", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP/IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jack Hessel, Lillian Lee, and David Mimno. 2019. Unsupervised discovery of multimodal links in multi-image, multi-sentence documents. In EMNLP/IJCNLP.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A comprehensive survey of deep learning for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Ferdous", |
|
"middle": [], |
|
"last": "Md Hossain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sohel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "51", |
|
"issue": "6", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "MD Hossain, Ferdous Sohel, Mohd Fairuz Shiratuddin, and Hamid Laga. 2019. A comprehensive survey of deep learning for image captioning. ACM Comput- ing Surveys (CSUR), 51(6):118.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Deep visualsemantic alignments for generating image descriptions", |
|
"authors": [ |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei-Fei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrej Karpathy and Fei-Fei Li. 2015. Deep visual- semantic alignments for generating image descrip- tions. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A hierarchical approach for generating descriptive image paragraphs", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Krause", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ranjay", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan Krause, Justin Johnson, Ranjay Krishna, and Li Fei-Fei. 2017. A hierarchical approach for gener- ating descriptive image paragraphs. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Towards unsupervised image captioning with shared multimodal embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Iro", |
|
"middle": [], |
|
"last": "Laina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Rupprecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nassir", |
|
"middle": [], |
|
"last": "Navab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ICCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iro Laina, Christian Rupprecht, and Nassir Navab. 2019. Towards unsupervised image captioning with shared multimodal embeddings. In ICCV.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Abstractive summarization: A survey of the state of the art", |
|
"authors": [ |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hui Lin and Vincent Ng. 2019. Abstractive summariza- tion: A survey of the state of the art. In AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Microsoft COCO: common objects in context", |
|
"authors": [ |
|
{ |
|
"first": "Tsung-Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Maire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Belongie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Hays", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietro", |
|
"middle": [], |
|
"last": "Perona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deva", |
|
"middle": [], |
|
"last": "Ramanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Doll\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Lawrence" |
|
], |
|
"last": "Zitnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ECCV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C. Lawrence Zitnick. 2014. Microsoft COCO: common objects in context. In ECCV.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Beyond narrative description: Generating poetry from images by multi-adversarial training", |
|
"authors": [ |
|
{ |
|
"first": "Bei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianlong", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Makoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masatoshi", |
|
"middle": [], |
|
"last": "Kato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yoshikawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Multimedia", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bei Liu, Jianlong Fu, Makoto P. Kato, and Masatoshi Yoshikawa. 2018. Beyond narrative description: Generating poetry from images by multi-adversarial training. In ACM Multimedia.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Tatsuru Higurashi, and Yoshimune Tabuchi. 2019. A case study on neural headline generation for editing support", |
|
"authors": [ |
|
{ |
|
"first": "Kazuma", |
|
"middle": [], |
|
"last": "Murao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ken", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hayato", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taichi", |
|
"middle": [], |
|
"last": "Yatsuka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takeshi", |
|
"middle": [], |
|
"last": "Masuyama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kazuma Murao, Ken Kobayashi, Hayato Kobayashi, Taichi Yatsuka, Takeshi Masuyama, Tatsuru Hig- urashi, and Yoshimune Tabuchi. 2019. A case study on neural headline generation for editing support. In NAACL-HLT.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Attend to you: Personalized image captioning with context sequence memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Byeongchang", |
|
"middle": [], |
|
"last": "Cesc Chunseong Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunhee", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cesc Chunseong Park, Byeongchang Kim, and Gunhee Kim. 2017. Attend to you: Personalized image cap- tioning with context sequence memory networks. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "BERT can see out of the box: On the cross-modal transferability of text representations", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Scialom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul-Alexis", |
|
"middle": [], |
|
"last": "Dray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacopo", |
|
"middle": [], |
|
"last": "Staiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Gallinari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Scialom, Patrick Bordes, Paul-Alexis Dray, Ja- copo Staiano, and Patrick Gallinari. 2020. BERT can see out of the box: On the cross-modal transfer- ability of text representations. CoRR.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Get to the point: Summarization with pointergenerator networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer- generator networks. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Engaging image captioning via personality", |
|
"authors": [ |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Humeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hexiang", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kurt Shuster, Samuel Humeau, Hexiang Hu, Antoine Bordes, and Jason Weston. 2019. Engaging image captioning via personality. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Transform and Tell: Entity-Aware News Image Captioning", |
|
"authors": [ |
|
{ |
|
"first": "Alasdair", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Mathews", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lexing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alasdair Tran, Alexander Mathews, and Lexing Xie. 2020. Transform and Tell: Entity-Aware News Im- age Captioning. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Show and tell: A neural image caption generator", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Toshev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dumitru", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "CVPR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015. Show and tell: A neural im- age caption generator. In CVPR.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "An overview of image caption generation methods", |
|
"authors": [ |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaosheng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Comp. Int. and Neurosc", |
|
"volume": "", |
|
"issue": "13", |
|
"pages": "3062706--3062707", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoran Wang, Yue Zhang, and Xiaosheng Yu. 2020. An overview of image caption generation meth- ods. Comp. Int. and Neurosc., 2020:3062706:1- 3062706:13.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Image captioning and visual question answering based on attributes and external knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunhua", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Dick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Van Den", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hengel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Trans. Pattern Anal. Mach. Intell", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qi Wu, Chunhua Shen, Peng Wang, Anthony R. Dick, and Anton van den Hengel. 2018. Image captioning and visual question answering based on attributes and external knowledge. IEEE Trans. Pattern Anal. Mach. Intell.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "XGPT: cross-modal generative pre-training for image captioning", |
|
"authors": [ |
|
{ |
|
"first": "Qiaolin", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoyang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifang", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taroon", |
|
"middle": [], |
|
"last": "Bharti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qiaolin Xia, Haoyang Huang, Nan Duan, Dongdong Zhang, Lei Ji, Zhifang Sui, Edward Cui, Taroon Bharti, Xin Liu, and Ming Zhou. 2020. XGPT: cross-modal generative pre-training for image cap- tioning. CoRR.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Improving image captioning by leveraging knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Yimin", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiwei", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasant", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Honavar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "WACV", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yimin Zhou, Yiwei Sun, and Vasant G. Honavar. 2019. Improving image captioning by leveraging knowl- edge graphs. In WACV.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "MSMO: multimodal summarization with multimodal output", |
|
"authors": [ |
|
{ |
|
"first": "Junnan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianshang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengqing", |
|
"middle": [], |
|
"last": "Zong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junnan Zhu, Haoran Li, Tianshang Liu, Yu Zhou, Ji- ajun Zhang, and Chengqing Zong. 2018. MSMO: multimodal summarization with multimodal output. In EMNLP.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Our novel Contextual Captions capture the affective theme from a given image and its associated paragraph." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "A schematic diagram of our contextual captioning model" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Linguistic richness of Contextual Captions in contrast to those generated from only image or only text." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Effect of various factors in Contextual Captioning." |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td/><td colspan=\"2\">Image-only</td><td colspan=\"2\">Text-only</td></tr><tr><td/><td>\u2265</td><td>></td><td>\u2265</td><td>></td></tr><tr><td>+NE</td><td colspan=\"4\">71.6 42.4 74.4 38.4</td></tr><tr><td colspan=\"5\">+NE-overlap 69.6 42.8 74.0 44.4</td></tr><tr><td>NE</td><td colspan=\"4\">70.0 45.2 73.6 41.2</td></tr><tr><td>NE-overlap</td><td colspan=\"4\">76.0 48.4 81.2 49.6</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Percentage of samples where contextual captions are rated as good as or better than baselines." |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td>+NE</td><td colspan=\"3\">+NE-NENE-</td><td colspan=\"2\">Image-Text-</td></tr><tr><td/><td>overlap</td><td/><td>overlap</td><td>only</td><td>only</td></tr><tr><td>17.8</td><td>15.8</td><td>15.9</td><td>25.7</td><td>15.0</td><td>9.9</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Percentage of samples rated highest per model." |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td/><td colspan=\"2\">Image-only</td><td colspan=\"2\">Text-only</td></tr><tr><td/><td>\u2265</td><td>></td><td>\u2265</td><td>></td></tr><tr><td>+NE</td><td colspan=\"4\">66.1 55.7 67.8 47.0</td></tr><tr><td colspan=\"5\">+NE-overlap 64.4 53.9 69.6 54.0</td></tr><tr><td>NE</td><td colspan=\"4\">60.9 48.7 64.4 43.5</td></tr><tr><td>NE-overlap</td><td colspan=\"4\">72.2 59.1 78.3 58.3</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Percentage of Significant samples where contextual captions are rated as good as or better than baselines." |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>Models</td><td>+NE</td><td colspan=\"3\">+NE-NENE-</td></tr><tr><td/><td/><td>overlap</td><td/><td>overlap</td></tr><tr><td colspan=\"2\">Image-only 0.89</td><td>0.70</td><td>1.41</td><td>1.05</td></tr><tr><td>Text-only</td><td>1.39</td><td>1.46</td><td>1.38</td><td>1.27</td></tr><tr><td>Contextual</td><td>1.29</td><td>1.28</td><td>1.14</td><td>1.13</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Validation loss of the reported test models." |
|
} |
|
} |
|
} |
|
} |