|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:58:26.766776Z" |
|
}, |
|
"title": "Finding and Generating a Missing Part for Story Completion", |
|
"authors": [ |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Tokyo", |
|
"location": { |
|
"country": "2 RIKEN" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hiroaki", |
|
"middle": [], |
|
"last": "Yamane", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Tokyo", |
|
"location": { |
|
"country": "2 RIKEN" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Mukuta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Tokyo", |
|
"location": { |
|
"country": "2 RIKEN" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Tatsuya", |
|
"middle": [], |
|
"last": "Harada", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Tokyo", |
|
"location": { |
|
"country": "2 RIKEN" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Creating a story is difficult. Professional writers often experience a writer's block. Thus, providing automatic support to writers is crucial but also challenging. Recently, in the field of generating and understanding stories, story completion (SC) has been proposed as a method for generating missing parts of an incomplete story. Despite this method's usefulness in providing creative support, its applicability is currently limited because it requires the user to have prior knowledge of the missing part of a story. Writers do not always know which part of their writing is flawed. To overcome this problem, we propose a novel approach called \"missing position prediction (MPP).\" Given an incomplete story, we aim to predict the position of the missing part. We also propose a novel method for MPP and SC. We first conduct an experiment focusing on MPP, and our analysis shows that highly accurate predictions can be obtained when the missing part of a story is the beginning or the end. This suggests that if a story has a specific beginning or end, they play significant roles. We conduct an experiment on SC using MPP, and our proposed method demonstrates promising results.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Creating a story is difficult. Professional writers often experience a writer's block. Thus, providing automatic support to writers is crucial but also challenging. Recently, in the field of generating and understanding stories, story completion (SC) has been proposed as a method for generating missing parts of an incomplete story. Despite this method's usefulness in providing creative support, its applicability is currently limited because it requires the user to have prior knowledge of the missing part of a story. Writers do not always know which part of their writing is flawed. To overcome this problem, we propose a novel approach called \"missing position prediction (MPP).\" Given an incomplete story, we aim to predict the position of the missing part. We also propose a novel method for MPP and SC. We first conduct an experiment focusing on MPP, and our analysis shows that highly accurate predictions can be obtained when the missing part of a story is the beginning or the end. This suggests that if a story has a specific beginning or end, they play significant roles. We conduct an experiment on SC using MPP, and our proposed method demonstrates promising results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Currently, because of the Internet, anybody can freely publish their original stories. However, it is challenging to write something that people would like and want to read. Sometimes, even professional writers fall into slumps during the writing process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Numerous studies on understanding the secret of creating good stories have been conducted (Campbell, 1949; Propp, 1968) . Rules for creating stories have been studied extensively, and \"three-act structure\" (Field, 2006) and \"Save the cat\" (Snyder, 2005) are popular examples. These works can help guide people who want to write good stories to demonstrate their creativity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 106, |
|
"text": "(Campbell, 1949;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 119, |
|
"text": "Propp, 1968)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 219, |
|
"text": "(Field, 2006)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 253, |
|
"text": "(Snyder, 2005)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With the development of machine learning (ML) and natural language processing (NLP) technology in recent years, the creation of an automated system that supports the creative endeavors of people is now feasible (Roemmele, 2016; Peng et al., 2018; Yao et al., 2019; Goldfarb-Tarrant et al., 2019) . To assist people in creating stories, it is essential to train computers to understand and create stories.", |
|
"cite_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 227, |
|
"text": "(Roemmele, 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 246, |
|
"text": "Peng et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 264, |
|
"text": "Yao et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 295, |
|
"text": "Goldfarb-Tarrant et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To measure the reading comprehension abilities of systems regarding stories, Mostafazadeh et al. (2016) proposed the \"Story Cloze Test\" (SCT). In the SCT, four sentences are presented, and the last sentence is excluded from an original five-sentence story. The objective of this task is to select an appropriate sentence from two options that complement the missing last sentence. Based on this approach, Wang and Wan (2019) proposed the \"Story Completion (SC)\" task in the field of generating and understanding stories. Given any four sentences of a five-sentence story, the objective of this task is to generate the sentence that is not given (known as the missing plot) to complete the story.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 103, |
|
"text": "Mostafazadeh et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 424, |
|
"text": "Wang and Wan (2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The ability to solve the SC is essential in the context of creative support. If writers cannot complete a story or a plot, a suitable model can provide them with the appropriate support. However, such applications are currently restricted because they require the user to know which part of a story is missing in advance. When considering an actual application, writers do not always know where their writing is Additionally, we propose a novel method for MPP and SC. Given an incomplete story, it estimates the missing part and generates a sentence to complete the story. We make our code available to support further progress on our proposed task and SC. 1 Our main contributions are as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 657, |
|
"end": 658, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose \"Missing Position Prediction (MPP)\" as a story comprehension method. This method predicts the position of a missing part of an incomplete story and has significance in the contexts of story understanding, story generation, and story-writing assistance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a novel method for MPP and SC. We first perform an experiment focusing on the MPP, and our proposed method demonstrates promising results. An analysis of the results shows that highly accurate predictions can be obtained when the missing part of a story is its beginning or end.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Based on the results of the MPP experiment, we conduct another experiment on SC using MPP. The results of the experiment show that given an incomplete story, it is possible to restore it such that it is comparable with the original human-written one.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In some studies, to better comprehend stories, the stories were considered to be collections of events. The \"Narrative Cloze Test\" (Chambers and Jurafsky, 2008 ) is a typical example. Mostafazadeh et al. (2016) proposed the SCT as a more difficult task. The SCT presents four sentences, and the last sentence is excluded from a story composed of five sentences. The system must select an appropriate sentence from two choices that complement the missing last sentence. In addition to the task, the authors released a large-scale story corpus named \"ROCStories,\" which is a collection of non-fictional daily-life stories written by hundreds of workers belonging to Amazon Mechanical Turk (Amazon MTurk).", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 159, |
|
"text": "(Chambers and Jurafsky, 2008", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 210, |
|
"text": "Mostafazadeh et al. (2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reading Comprehension on Stories", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In our proposed task, it is essential to understand the remaining information to infer what is missing. Regarding the example in Figure 1 , the third sentence states that Jennifer is weary, and the fourth sentence mentions that she felt bittersweet. It is estimated that something mentioned as \"it\" is missing, and \"it\" is the reason for her change of feeling. In this manner, it is necessary to identify unnaturalness -that is, the parts where the narrative arc is broken -in a story. This is a more challenging task than SCT. We believe this task is deeply related to a fundamental question in story understanding: whether or not the model understands the flow of a story.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 137, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Reading Comprehension on Stories", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Inspired by the SCT, Zhao et al. (2018) designed \"Story Ending Generation (SEG)\" as a subtask of story generation. Given an incomplete story, where the last sentence is excluded from the original five-sentence story, the objective of this task is to generate the last sentence, not to select. Furthermore, based on SEG, Wang and Wan (2019) proposed the SC and investigated the problem of generating a missing story plot at any position in an incomplete story.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 39, |
|
"text": "Zhao et al. (2018)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 339, |
|
"text": "Wang and Wan (2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Generation of Stories", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Additionally, in recent years, research regarding text infilling has been actively conducted (Ippolito et al., 2019; Donahue et al., 2020; Huang et al., 2020) . Regarding stories, Ippolito et al. (2019) worked on complementing the missing span between left and right contexts, which they called \"story infilling.\" In the appendix, they reported that they tried human evaluation using Amazon MTurk but their task was too hard for the average worker. Although there is no mention of why the task was too hard for the average worker, we suspect that the length of the text in their task may have been one of the reasons.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 116, |
|
"text": "(Ippolito et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 138, |
|
"text": "Donahue et al., 2020;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 158, |
|
"text": "Huang et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 202, |
|
"text": "Ippolito et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Generation of Stories", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "These studies require a writer to have prior knowledge of the missing parts and do not consider the case where the writer is unaware of the flaws in his/her work. The MPP aims to fill this gap.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Generation of Stories", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We should note that even when there is a missing part in the story, it may be caused by writer's intention that \"I want the readers to read between lines\". However, the missing part can also be an unintentional mistake. To analyze if the model can understand whether the missing part is an \"writer's intentional missing\" is out of the scope of this study. At this stage, MPP is especially effective in the latter case, unintentional mistake. However, when a model's understanding of writer's intentional missing is achieved, it is expected that writers can also be benefited in the former case -using a method of MPP, they can know whether their intention is well understandable by readers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Generation of Stories", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "As the first step, we used short stories for this task. Instead of asking average workers, we did a qualification test and only qualified workers could participate in the evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Partial Generation of Stories", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In SEG, a simple type of the sequence-to-sequence model (Seq2seq) (Sutskever et al., 2014) and an extension using the attention mechanism are typically used as baselines (Zhao et al., 2018; Li et al., 2018; Guan et al., 2019; Mori et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 90, |
|
"text": "(Sutskever et al., 2014)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 189, |
|
"text": "(Zhao et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 190, |
|
"end": 206, |
|
"text": "Li et al., 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 225, |
|
"text": "Guan et al., 2019;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 244, |
|
"text": "Mori et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seq2seq for Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The use of unsupervised pre-trained large neural models, such as BERT (Devlin et al., 2019) and GPT-2 (Radford et al., 2019) , has become mainstream in NLP. BERT is originally trained as a masked language model and considered unsuitable for text generation compared with models using a left-toright architecture, such as GPT-2, XLNet (Yang et al., 2019) , and BART (Lewis et al., 2020) . However, experiments conducted by Rothe et al. (2020) using BERT and GPT-2 for Seq2seq demonstrated interesting results. Although they did not claim that BERT is optimal as a decoder, they demonstrated that BERT2BERT outperforms BERT2GPT in some generation tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 91, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 124, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 353, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 385, |
|
"text": "(Lewis et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seq2seq for Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In this study, we extend the Seq2seq-based method for SEG to solve MPP and SC. To achieve a more natural sentence completion, we use BERT as a Seq2seq decoder and BERT-derived Sentence-BERT (SBERT) (Reimers and Gurevych, 2019) as a part of an encoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seq2seq for Text Generation", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We begin by formulating SEG and the SC, after which we formulate our proposed task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We define S = {s 1 , s 2 , ..., s n } as a story comprising n sentences. In SEG, S = {s 1 , s 2 , ..., s n\u22121 } is given as an input. The objective of the task is to generate an appropriate ending. For SC, an incomplete story consisting of n \u2212 1 sentences S = {s 1 , ..., s k\u22121 , s k+1 , ..., s n }, where k represents the position of Ronda was at the fair craving something buttery.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Story Ending Generation and Story Completion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "When she had her bag, she began taking bites.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Story Ending Generation and Story Completion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To her dismay this popcorn was sweet, which she hated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Story Ending Generation and Story Completion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "She ended up giving the popcorn to her daughter instead. ronda was at the fair craving something buttery. she was excited to eat some popcorn. when she had her bag, she began taking bites. to her dismay this popcorn was sweet, which she hated. she ended up giving the popcorn to her daughter instead.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Story Ending Generation and Story Completion", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "she was excited to eat some popcorn.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "lowercased Figure 2 : An overview of the proposed method. the missing sentence in the story, is given. Next, we focus on the objective of the task that involves generating an appropriate sentence which is coherent with the given sentences. During each task, the model is trained to maximize probability p(y|S ), where y represents the ground truth sentence. Specifically, y = s n in SEG and y = s k in SC.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 19, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Story Completion", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To overcome the issue whereby the SC model requires information regarding k, i.e., the position of the missing sentence, we propose the MPP to predict k from a given n \u2212 1 sentences, as shown in Figure 1 . Similar to the SC, an incomplete story comprising n \u2212 1 sentences S = {s 1 , ..., s k\u22121 , s k+1 , ..., s n } is given as an input. However, any information regarding k is not given. The order of the sentences is known, but the missing position is unknown. Specifically, s k\u22121 and s k+1 are treated as continuous sentences. Our objective is to predict k from the input. In other words, the model is trained to maximize probability p(missing = k|S ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 204, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Missing Position Prediction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Hierarchical approaches have demonstrated effectiveness in story generation (Fan et al., 2018; Ravi et al., 2018) . We propose a novel method with a hierarchical architecture for the MPP and SC. We devise a method inspired by the two-step encoder of Hierarchical-Seq2seq, which is a simple method for SEG that we proposed in our previous study (Mori et al., 2019) . The first encoder receives S = {s 1 , ..., s k\u22121 , s k+1 , ..., s n } and outputs the sentence embeddings {v 1 , ..., v k\u22121 , v k+1 , ..., v n }. Next, the second encoder receives the sentence embeddings and generates a distributed representation of the entire context v context . We call the first encoder \"sentence encoder,\" and the second encoder \"context encoder.\" For MPP, we input v context into a linear layer and obtain a five-unit output. For SC, we input v context into a language model and obtain a sentence to complete the story. Figure 2 shows an overview of the proposed method. Although the output of the MPP can be used here, we prefer to have our model learn these two tasks simultaneously. We intend to take it up as future work to use predicted MPP for SC and vice versa.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 94, |
|
"text": "(Fan et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 95, |
|
"end": 113, |
|
"text": "Ravi et al., 2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 363, |
|
"text": "(Mori et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 908, |
|
"end": 916, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In proposing a new task, we believe it is useful to test how well a simple method can solve the task. Analyzing the performance and characteristics of a simple method will help in the application of complex methods in future studies. Thus, we propose a new task along with a simple method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Method", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "First, we obtain sentence embeddings v j for each input sentence s j in a given context. We apply SBERT in each sentence. This encoder is not fine-tuned during our training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Using the sentence embeddings obtained, we apply another encoding layer to handle context embedding v context . Although there is a discontinuity in the input and the missing position k is not given, the order is preserved. Hence, it is considered to be appropriate to treat the input as a sequence. We propose to use gated recurrent unit (GRU) (Cho et al., 2014) as the main part of the encoder. GRU is a type of RNN and is useful in handling sequences. Ravi et al. (2018) used an RNN with GRU cells for story encoding, demonstrating that a GRU is sufficient to capture the sequence in a short story. Li et al. (2019) also used GRU and its variant in their Context Encoder. The output of the GRU is input into a linear layer and a batch normalization layer (Ioffe and Szegedy, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 345, |
|
"end": 363, |
|
"text": "(Cho et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 473, |
|
"text": "Ravi et al. (2018)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 618, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 758, |
|
"end": 783, |
|
"text": "(Ioffe and Szegedy, 2015)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Encoder", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We use BERT as a language model for generating sentences to fill in the missing parts. Here, BERT is used as a decoder, and the output of the context encoder is used to initialize the encoder hidden states in cross-attention. Starting from the start token, we repeat the next token prediction for sentence generation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Language Model", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "First, we worked on learning the MPP only. In this experiment, we investigated the part of the proposed method that excludes the language model. ROCStories is a well-organized corpus and is widely used in story-generation tasks; it is typically used in SEG (Zhao et al., 2018; Li et al., 2018; Guan et al., 2019) . Similarly, Wang and Wan (2019) used it for their story-completion task. Furthermore, the dataset was used by Peng et al. (2018) for controllable story generation. Qin et al. (2019) tackled \"Counterfactual Story Rewriting,\" which is a story revising task, using their proposed TIMETRAVEL dataset built using ROCStories. Although, initially, we did consider using other datasets, such as WritingPrompts, we ultimately did not use them. Stories in WritingPrompts vary in terms of length, and therefore, the importance of a single sentence varies from one story to the other. Thus, considering the requirements of our analysis, the aforementioned dataset seemed inappropriate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 276, |
|
"text": "(Zhao et al., 2018;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 293, |
|
"text": "Li et al., 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 312, |
|
"text": "Guan et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 345, |
|
"text": "Wang and Wan (2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 442, |
|
"text": "Peng et al. (2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 478, |
|
"end": 495, |
|
"text": "Qin et al. (2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment 1: Missing Position Prediction", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Thus, as a starting point for proposing the task, we used ROCStories. As shown in Table 1 , the dataset was randomly split in the ratio of 8:1:1 to obtain the training, development, and test sets, respectively. We removed one sentence from a five-sentence story. The missing position k was randomly decided based on a discrete uniform distribution. For the development and test sets, this removal procedure was performed when creating the dataset to improve reproducibility. For the training set, we retained the original fivesentence story in the dataset and removed a sentence randomly when reading the data during training. As a result, a different sentence could be removed from the same story, with a different k value, thus acting as data augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 89, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Max-pool Context. To examine the usefulness of treating context as a sequence in the proposed task, we trained another model. In this setting, a max pooling layer was used as a context encoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison Method", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We trained a model for 30 epochs. The validation loss for every epoch was calculated, and the state with the smallest validation loss was used for further tests. Among the trained SBERTs, we used \"bert-basenli-mean-tokens.\" The output dimension was 768. For the GRU context, the number of hidden units of the GRU was 256. The linear layer had 256 dimensions for both the input and output, and weights were initialized from a normal distribution with mean = 0, std = 0.01. For the max-pool context, we applied max pooling to sentence embeddings and obtained a vector with the same dimension as the sentence embedding. We then input this vector into a linear layer and obtained a 256-dimensional vector as the context vector. The linear layer for receiving the output of the context encoder and for identifying the five labels had a 256-dimensional input and a five-dimensional output. We used the Adam optimizer with a learning rate of 0.001, \u03b2 1 = 0.9, \u03b2 2 = 0.999, and a weight decay of 0. A gradient clipping with a value of 5 was used. We set the batch size to 256.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For each method, we performed five trials while changing the random seed at the time of training and calculated the mean and standard deviations of the accuracy. As shown in Table 2 , the GRU context achieved an accuracy of 52.2 \u00b1 0.220%, which was higher than the accuracy of the max-pool context. The results indicated the usefulness of treating context as a sequence in the proposed task. Hereinafter, for a more detailed discussion, we use one of the five trials as an example. The heat map in Figure 3 (a) shows which positions can be accurately identified using the GRU context method. When the sentence 1 was missing, the accuracy exceeded 80%. The results of sentences 2 to 4 exhibited lower accuracy, whereas sentence 5 had a higher accuracy. Figure 3 (b) shows the max-pool context result for each missing position. Even though this method does not consider the sequence of the context, the prediction results for the sentences 2 to 4 are lower than those for sentences 1 and 5. Thus, it can be inferred that treating a context as a sequence does not adversely affect the prediction of missing middle sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 181, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 510, |
|
"text": "Figure 3 (a)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 752, |
|
"end": 764, |
|
"text": "Figure 3 (b)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Based on the results of Experiment 1, we conducted another experiment in which we tackled both MPP and SC. As a context encoder, we used the GRU context. We used the same dataset as in Section 5.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment 2: Missing Position Prediction + Story Completion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We trained a model for 50 epochs. The validation loss for every epoch was calculated, and the state with the smallest validation loss was used for human evaluation. For the GRU context, the number of hidden units was 768. The linear layer had 768 dimensions for both the input and output. We used HuggingFace's implementation of BERT and its pre-trained model \"bert-base-uncased\" (Wolf et al., 2019) . We calculated the total loss as follows: L total = 0.5 * L M P P + 0.5 * L SC , where L M P P represents the softmax cross entropy loss for MPP, and L SC represents the softmax cross entropy loss for SC. We optimized the value of L total using the AdamW optimizer with a learning rate of 1e \u2212 4, \u03b2 1 = 0.9, \u03b2 2 = 0.999, = 1e \u2212 8, and a weight decay of 0. We used a linear learning rate warmup with 4 epochs. We used a gradient clipping with a value of 1 and set the batch size to 128.", |
|
"cite_spans": [ |
|
{ |
|
"start": 380, |
|
"end": 399, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We conducted human evaluation with the help of Amazon MTurk workers. We conducted two types of tasks: a qualification test and a pair-wise evaluation task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "To choose workers with a high degree of ability to evaluate stories for participation in the evaluation task, we first conducted a qualification test. Ten randomly selected questions from the validation set of the SCT were solved by the workers, and only those workers who answered all ten questions correctly were allowed to participate in the next evaluation task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "For the pair-wise evaluation task, the qualified workers were given two similar short stories, and they were asked to choose which story gave the impression of being a complete story. The workers were given four choices as follows: Option A is more appropriate, Option B is more appropriate, both options are equally appropriate, and neither option is suitable. In this evaluation task, workers were also required to write the reason for their answer. We used 200 story pairs for comparison. The original human-written story (GT) is from the test set shown in Table 1 , and our proposed model generated the other candidates based on an incomplete story. Five workers evaluated each story pair. Among the five answers obtained for each story, the most frequently chosen answers were considered as an agreement among the workers. Notably, that the workers did not do the same number of tasks. Therefore, instead of calculating the inter-annotator agreement, we decided to consider the most frequent answer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 560, |
|
"end": 567, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The results of the human evaluation are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 56, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Proposed GT both neither 8 148 44 0 Table 3 : Human evaluation results of pair-wise experiment. We used 200 stories, and each story was evaluated by five workers. The most frequently chosen answers were considered as their agreement.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 43, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Regarding the 200 stories that were autocompleted, eight were judged to be better than the original story, and 44 were judged to be equivalent to the original story. In other words, our proposed method can generate a story that is either as good as or better than a GT story with 26% probability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "The results of Experiment 1 support the following two findings from Wang and Wan (2019): 1) The plot becomes more complicated as it progresses, thereby making the estimation of latter sentences more difficult and 2) for k = 5, four sentences in the context are continuous. Therefore, a good expression can be easily obtained, even by using an encoder that does not consider discontinuity. It is interesting to note that the beginning or the end of a story can be predicted with the highest accuracy. This appears to be related to the fact that the collection of ROCStories was performed with the following in mind: \"the story should read like a coherent story, with a specific beginning and ending.\" In other words, the story under consideration has a specific beginning and ending. Thus, if the beginning or the ending is missing, it can be interpreted as that the methods treat the story as particularly unnatural and predict the missing position with high accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For qualitative analysis on Experiment 2, we show three examples of story pairs and human evaluations in Table 4 . In the first example, the autocompleted story was evaluated to be better than the GT. MPP was a success, and a contextualized completion sentence was generated. In the second example, the autocomplete story was rated as equivalent to the GT. MPP estimated a missing location that differed from the original story but increased information differently from the GT, which was appreciated by the workers. In the third example, autocompletion did not work. It succeeded in MPP, but it failed in generating a contextualized completion sentence. The failure to generate an essential word (\"contest\") is pointed out. Note that the second answer appears to have been mischaracterized.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 112, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "To overcome the issue of conventional SC tasks that require information regarding the position of the missing part in a story, we proposed a MPP to predict the position based on the given incomplete story. Our proposed method demonstrated that treating the context as a sequence is useful for solving this new task. We examined the prediction accuracy for each missing position and found that a prediction is easier if the beginning or the end of a story is missing. Furthermore, we tackled the combined task of MPP and SC. We conducted a pair-wise human evaluation against a human-written story, for which our proposed method demonstrated promising results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Because we limit the study to five-sentence stories, it is unlikely that humans make mistakes in the plot. However, humans may overlook plot imperfections when considering longer, more complex stories. Thus, checking such mistakes is part of the editors' job. We proposed the task in the context of creative support, but it also can be positioned in the context of narrative understanding. Planning a story requires a form of reasoning that can move backward as well as forward. That is why SC tasks have significant meaning in story understanding and generation, and our proposed task would be a better test of a model's abilities to understand the flow of a story.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "For the sake of simplicity, we proposed a simple machine-learning-based method. However, using simple bag-of-words methods or part-of-speech analysis may be effective for our proposed task. Therefore, exploring the efficacy of using methods other than those based on machine learning is left as future work. However, our proposed task poses specific limitations. In our task, it is known that there is a missing position in the input story, and that there is only one such instance. In reality, an input story may be complete, that is, k is null. Furthermore, there may be a case in which there are multiple missing positions, that is, a case in which k has multiple values. Although dealing with these constraints is left for future studies, it is conceivable to introduce a certainty factor for the missing prediction. For example, predicting that k is null when the certainty factor is low. Although we considered a constrained case of study, we believe that our proposed task is an important step toward assisting writers in the creation of stories.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://github.com/mil-tokyo/missing-position-prediction", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Yusuke Kurose, Naoyuki Gunji, and Ryohei Shimizu for helpful discussions. This work was supported by JST AIP Acceleration Research Grant Number JPMJCR20U3 and JSPS KAKENHI Grant Number JP20H05556, Japan.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "since the questions were complicated, i was extremely nervous. despite believing that i've failed, i turned the exam in. the teacher handed the exams back to us the next day. i ended up receiving a b.GT i took my class final in math today. since the questions were complicated, i was extremely nervous. despite believing that i've failed, i turned the exam in. the teacher handed the exams back to us the next day. i ended up receiving a b. Ours my teacher gave us a test. since the questions were complicated, i was extremely nervous. despite believing that i've failed, i turned the exam in. the teacher handed the exams back to us the next day. i ended up receiving a b.Answers with Reasons (A: GT, B: Ours)both Whether it is a class final or a given test, both stories are the same and therefore both complete. neither both doesn't make sense Ours A is jumbled and does not make sense. B is logically arranged as a story. OursIn \"A,\" it wouldn't make sense that a final exam was handed back in class the next day. Ours B was more appropriate since it is having a continuous flow than A Context tom was at a local park. there was an egg hunt for the kids. tom decided to pick some eggs up. he enjoyed the treats in them.GT tom was at a local park. it was easter. there was an egg hunt for the kids. tom decided to pick some eggs up. he enjoyed the treats in them. Ours tom was at a local park. there was an egg hunt for the kids. tom decided to pick some eggs up. tom was able to get many eggs. he enjoyed the treats in them.Answers with Reasons (A: GT, B: Ours) both both are complete sentences Ours Option B is complete as it says that tom was able to get some eggs in the hunt. both Both of them can be considered complete. Story A tells us it is Easter (and story B doesn't) while Story B tells us Tom picked many eggs (and story A doesn't). Both of those details could be removed and the stories would still be the same.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The fact that he was able to gather some eggs was more complete than just deciding to pick up some eggs. Story A Easter gave a better time context but did not really add as much to the story since traditionally an egg hunt is held on Easter so the omission of that in Story B was made up for Tom being able to gather some eggs. both Both stories have a starting, content and ending.Context timothy loved to dance. timothy didn't have much confidence in himself. it took everything he had to dance with all of his self doubt. everyone loved his dancing and he won the contest.GT timothy loved to dance. there was a dance contest that was coming up soon. timothy didn't have much confidence in himself. it took everything he had to dance with all of his self doubt. everyone loved his dancing and he won the contest. Ours timothy loved to dance. he decided to take dance lessons. timothy didn't have much confidence in himself. it took everything he had to dance with all of his self doubt. everyone loved his dancing and he won the contest.Answers with Reasons (A: Ours, B: GT)GT Only B makes sense and a complete story. GT A is more correct and arranged GT Story A doesn't mention the contest which Timothy ends up winning, therefore misses an important piece of the story. GT Story B mentions that there was a dance contest at the start and that he won it at the end. Story A only mentions a contest abruptly at the end making it seem out of place. GT B is more good Table 4 : Examples of original and autocompleted stories, followed by answers and reasoning by MTurk workers. The GT was not originally lowercased, but it was lowercased in our pair-wise evaluation task to compare with autocomplete stories. Additionally, the context given to the model is not lowercased, but it is lowercased here to make it easier to compare with the GT and our proposed method.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1468, |
|
"end": 1475, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ours", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The Hero with a Thousand Faces", |
|
"authors": [ |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Campbell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1949, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph Campbell. 1949. The Hero with a Thousand Faces. Pantheon Books.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Unsupervised learning of narrative event chains", |
|
"authors": [ |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "789--797", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nathanael Chambers and Dan Jurafsky. 2008. Unsupervised learning of narrative event chains. In Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 789-797, Columbus, Ohio, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning phrase representations using rnn encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Aglar G\u00fcl\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, \u00c7 aglar G\u00fcl\u00e7ehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, pages 1724-1734, Doha, Qatar, October. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirec- tional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Enabling language models to fill in the blanks", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Donahue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2492--2501", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Donahue, Mina Lee, and Percy Liang. 2020. Enabling language models to fill in the blanks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2492-2501, Online, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hierarchical neural story generation", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "889--898", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Mike Lewis, and Yann Dauphin. 2018. Hierarchical neural story generation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 889-898, Melbourne, Australia, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The Screenwriter's Workbook, Revised Edition", |
|
"authors": [ |
|
{ |
|
"first": "Syd", |
|
"middle": [], |
|
"last": "Field", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Syd Field. 2006. The Screenwriter's Workbook, Revised Edition. Delta Trade Paperbacks.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Plan, Write, and Revise: an interactive system for open-domain story generation", |
|
"authors": [ |
|
{ |
|
"first": "Seraphina", |
|
"middle": [], |
|
"last": "Goldfarb-Tarrant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haining", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "89--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seraphina Goldfarb-Tarrant, Haining Feng, and Nanyun Peng. 2019. Plan, Write, and Revise: an interactive system for open-domain story generation. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 89-97, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Story ending generation with incremental encoding and commonsense knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yansen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minlie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6473--6480", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jian Guan, Yansen Wang, and Minlie Huang. 2019. Story ending generation with incremental encoding and commonsense knowledge. In Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence, pages 6473-6480, Honolulu, Hawaii, January-February. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "INSET: Sentence infilling with INter-SEntential transformer", |
|
"authors": [ |
|
{ |
|
"first": "Yichen", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oussama", |
|
"middle": [], |
|
"last": "Elachqar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2502--2515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yichen Huang, Yizhe Zhang, Oussama Elachqar, and Yu Cheng. 2020. INSET: Sentence infilling with INter- SEntential transformer. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, pages 2502-2515, Online, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Batch normalization: Accelerating deep network training by reducing internal covariate shift", |
|
"authors": [ |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Ioffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Szegedy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 32nd International Conference on Machine Learning", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "448--456", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sergey Ioffe and Christian Szegedy. 2015. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Con- ference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 448-456, Lille, France, July. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Unsupervised hierarchical story infilling", |
|
"authors": [ |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Ippolito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Eck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the First Workshop on Narrative Understanding", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "37--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daphne Ippolito, David Grangier, Chris Callison-Burch, and Douglas Eck. 2019. Unsupervised hierarchical story infilling. In Proceedings of the First Workshop on Narrative Understanding, pages 37-43, Minneapolis, Minnesota, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelrahman", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural lan- guage generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Generating reasonable and diversified story ending using sequence to sequence model with adversarial training", |
|
"authors": [ |
|
{ |
|
"first": "Zhongyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1033--1043", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhongyang Li, Xiao Ding, and Ting Liu. 2018. Generating reasonable and diversified story ending using sequence to sequence model with adversarial training. In Proceedings of the 27th International Conference on Compu- tational Linguistics, pages 1033-1043, Santa Fe, New Mexico, USA, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Storygan: A sequential conditional gan for story visualization", |
|
"authors": [ |
|
{ |
|
"first": "Yitong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yelong", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuexin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Carin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Carlson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yitong Li, Zhe Gan, Yelong Shen, Jingjing Liu, Yu Cheng, Yuexin Wu, Lawrence Carin, David Carlson, and Jianfeng Gao. 2019. Storygan: A sequential conditional gan for story visualization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, June.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Toward a better story end: Collecting human evaluation with reasons", |
|
"authors": [ |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Mori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroaki", |
|
"middle": [], |
|
"last": "Yamane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Mukuta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tatsuya", |
|
"middle": [], |
|
"last": "Harada", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "383--390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yusuke Mori, Hiroaki Yamane, Yusuke Mukuta, and Tatsuya Harada. 2019. Toward a better story end: Collecting human evaluation with reasons. In Proceedings of the 12th International Conference on Natural Language Generation, pages 383-390, Tokyo, Japan, October-November. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Pushmeet Kohli, and James Allen. 2016. A corpus and cloze evaluation for deeper understanding of commonsense stories", |
|
"authors": [ |
|
{ |
|
"first": "Nasrin", |
|
"middle": [], |
|
"last": "Mostafazadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "839--849", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Push- meet Kohli, and James Allen. 2016. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Compu- tational Linguistics: Human Language Technologies, pages 839-849, San Diego, California, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Towards controllable story generation", |
|
"authors": [ |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marjan", |
|
"middle": [], |
|
"last": "Ghazvininejad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Storytelling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nanyun Peng, Marjan Ghazvininejad, Jonathan May, and Kevin Knight. 2018. Towards controllable story gen- eration. In Proceedings of the First Workshop on Storytelling, pages 43-49, New Orleans, Louisiana, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Morphology of the Folktale (Translated by L. Scott)", |
|
"authors": [ |
|
{ |
|
"first": "Propp", |
|
"middle": [], |
|
"last": "Vladimir Iakovlevich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1968, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir IAkovlevich Propp. 1968. Morphology of the Folktale (Translated by L. Scott). University of Texas Press.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Counterfactual story reasoning and generation", |
|
"authors": [ |
|
{ |
|
"first": "Lianhui", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Holtzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5043--5053", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lianhui Qin, Antoine Bosselut, Ari Holtzman, Chandra Bhagavatula, Elizabeth Clark, and Yejin Choi. 2019. Counterfactual story reasoning and generation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, pages 5043-5053, Hong Kong, China, November. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Show me a story: Towards coherent neural story illustration", |
|
"authors": [ |
|
{ |
|
"first": "Hareesh", |
|
"middle": [], |
|
"last": "Ravi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lezi", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Muniz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Sigal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Metaxas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mubbasir", |
|
"middle": [], |
|
"last": "Kapadia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hareesh Ravi, Lezi Wang, Carlos Muniz, Leonid Sigal, Dimitris Metaxas, and Mubbasir Kapadia. 2018. Show me a story: Towards coherent neural story illustration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, June.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Sentence-BERT: Sentence embeddings using Siamese BERT-networks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3980--3990", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Processing, pages 3980-3990, Hong Kong, China, November. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Writing Stories with Help from Recurrent Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Roemmele", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4311--4312", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melissa Roemmele. 2016. Writing Stories with Help from Recurrent Neural Networks. In AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence, pages 4311 -4312, Phoenix, AZ, February. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Leveraging pre-trained checkpoints for sequence generation tasks", |
|
"authors": [ |
|
{ |
|
"first": "Sascha", |
|
"middle": [], |
|
"last": "Rothe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aliaksei", |
|
"middle": [], |
|
"last": "Severyn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "264--280", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. 2020. Leveraging pre-trained checkpoints for sequence generation tasks. Transactions of the Association for Computational Linguistics, 8:264-280.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "SAVE THE CAT! The Last Book on Screenwriting You'll Ever Need", |
|
"authors": [ |
|
{ |
|
"first": "Blake", |
|
"middle": [], |
|
"last": "Snyder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blake Snyder. 2005. SAVE THE CAT! The Last Book on Screenwriting You'll Ever Need. Michael Wiese Produc- tions.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 27th International Conference on Neural Information Processing Systems", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural networks. In Proceedings of the 27th International Conference on Neural Information Processing Systems -Volume 2, NIPS'14, pages 3104-3112, Cambridge, MA, USA. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "T-CVAE: Transformer-based conditioned variational autoencoder for story completion", |
|
"authors": [ |
|
{ |
|
"first": "Tianming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5233--5239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianming Wang and Xiaojun Wan. 2019. T-CVAE: Transformer-based conditioned variational autoencoder for story completion. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, pages 5233-5239. International Joint Conferences on Artificial Intelligence Organization, July.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "HuggingFace's Transformers: State-ofthe-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R'emi Louf, Morgan Funtowicz, and Jamie Brew. 2019. HuggingFace's Transformers: State-of- the-art natural language processing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "XLNet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. XLNet: Generalized autoregressive pretraining for language understanding. In H. Wallach, H. Larochelle, A. Beygelz- imer, F. d'Alch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 5753-5763. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Plan-and-Write: Towards better automatic storytelling", |
|
"authors": [ |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weischedel", |
|
"middle": [], |
|
"last": "Ralph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7378--7385", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lili Yao, Nanyun Peng, Weischedel Ralph, Kevin Knight, Dongyan Zhao, and Rui Yan. 2019. Plan-and-Write: Towards better automatic storytelling. In Proceedings of the Thirty-Third AAAI Conference on Artificial Intelli- gence, pages 7378-7385, Honolulu, Hawaii, January-February. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "From plots to endings: A reinforced pointer generator for story ending generation", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunhua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruoyao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of Natural Language Processing and Chinese Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yan Zhao, Lu Liu, Chunhua Liu, Ruoyao Yang, and Dong Yu. 2018. From plots to endings: A reinforced pointer generator for story ending generation. In Proceedings of Natural Language Processing and Chinese Computing, volume abs/1901.03459.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Example of an incomplete story and flow of MPP and SC.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Heat maps showing the results of the (a) GRU context and (b) Max-pool context. The ground truth (GT) label is shown on the x-axis and the predicted label is on the y-axis. The squares on the diagonal line denote correct cases. The ratios of the predicted label to the GT label are shown numerically.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "Overview of the dataset used.", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |