|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:29:44.168988Z" |
|
}, |
|
"title": "Generating Narrative Text in a Switching Dynamical System", |
|
"authors": [ |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Stony Brook University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Leena", |
|
"middle": [], |
|
"last": "Shekhar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Stony Brook University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Heeyoung", |
|
"middle": [], |
|
"last": "Kwon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Stony Brook University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Niranjan", |
|
"middle": [], |
|
"last": "Balasubramanian", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Stony Brook University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Early work on narrative modeling used explicit plans and goals to generate stories, but the language generation itself was restricted and inflexible. Modern methods use language models for more robust generation, but often lack an explicit representation of the scaffolding and dynamics that guide a coherent narrative. This paper introduces a new model that integrates explicit narrative structure with neural language models, formalizing narrative modeling as a Switching Linear Dynamical System (SLDS). A SLDS is a dynamical system in which the latent dynamics of the system (i.e. how the state vector transforms over time) is controlled by top-level discrete switching variables. The switching variables represent narrative structure (e.g., sentiment or discourse states), while the latent state vector encodes information on the current state of the narrative. This probabilistic formulation allows us to control generation, and can be learned in a semi-supervised fashion using both labeled and unlabeled data. Additionally, we derive a Gibbs sampler for our model that can \"fill in\" arbitrary parts of the narrative, guided by the switching variables. Our filled-in (English language) narratives outperform several baselines on both automatic and human evaluations.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Early work on narrative modeling used explicit plans and goals to generate stories, but the language generation itself was restricted and inflexible. Modern methods use language models for more robust generation, but often lack an explicit representation of the scaffolding and dynamics that guide a coherent narrative. This paper introduces a new model that integrates explicit narrative structure with neural language models, formalizing narrative modeling as a Switching Linear Dynamical System (SLDS). A SLDS is a dynamical system in which the latent dynamics of the system (i.e. how the state vector transforms over time) is controlled by top-level discrete switching variables. The switching variables represent narrative structure (e.g., sentiment or discourse states), while the latent state vector encodes information on the current state of the narrative. This probabilistic formulation allows us to control generation, and can be learned in a semi-supervised fashion using both labeled and unlabeled data. Additionally, we derive a Gibbs sampler for our model that can \"fill in\" arbitrary parts of the narrative, guided by the switching variables. Our filled-in (English language) narratives outperform several baselines on both automatic and human evaluations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A narrative is a textualized sequence of events that serves as a coherent outline for an actual story (Prince, 2003) . Effective narratives are typically built on top of higher level narrative scaffolds 1 which specify at an abstract level how the story should evolve along different dimensions. Example scaffolds include descriptions of the emotional trajectory of a story (Vonnegut, 1981; Reagan et al., 2016) , the goals of characters throughout * Author now at Microsoft 1 We use the term scaffold as an umbrella term to cover many types of plans and structures that underlie stories. Tom didn't know why his internet speed was so slow.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 116, |
|
"text": "(Prince, 2003)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 390, |
|
"text": "(Vonnegut, 1981;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 411, |
|
"text": "Reagan et al., 2016)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 476, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Tom wasn't sure what to do with his computer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "He thought he would fix it himself.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Tom was surprisingly good.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Tom was happy to be surfing the internet again Table 1 : A sample filled in narrative generated by our SLDS model given the first and last sentences as input (grayed out), the middle 3 sentences are imputed by our model (bold).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 54, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "the story (Meehan, 1977; Turner, 1993) , or the abstract types of events that may occur (Martin et al., 2018) . The parts of a scaffold are generic, and like Propp's originally proposed narrative functions (Propp, 1928) , can be reused across stories. To be fully reusable, one needs to go beyond just identifying what the elements of the narrative scaffold are, and also indicate how each scaffold element changes the properties of the current story state. We refer to the explication of these transformation/transitions as the narrative dynamics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 24, |
|
"text": "(Meehan, 1977;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 25, |
|
"end": 38, |
|
"text": "Turner, 1993)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 109, |
|
"text": "(Martin et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 219, |
|
"text": "(Propp, 1928)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Prior work on automatic narrative generation has a rich history of modeling both narrative scaffolds and narrative dynamics (Meehan, 1977; Lebowitz, 1985; Turner, 1993; Young, 2006, 2010a) . The modeling of both narrative scaffold and dynamics often imbued these systems with a greater degree of control for the user in generating stories, allowing users to flexibly specify desired outcomes or plot points (or more generally, the state of the narrative) that should be achieved at certain sections of the story. Constrained generation (for example, constraining the story to start and end with particular sentences, such as that given in Table 1) , is an ability these systems often gained for free through the modeling of dynamics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 138, |
|
"text": "(Meehan, 1977;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 154, |
|
"text": "Lebowitz, 1985;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 168, |
|
"text": "Turner, 1993;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 188, |
|
"text": "Young, 2006, 2010a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 639, |
|
"end": 647, |
|
"text": "Table 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Though successful in this regard, this success has only been realized in closed domains, where the narrative scaffolds can be specified in a limited ontology and the dynamics operations can be written by hand (such as e.g. the action schemata of Riedl and Young (2010a)). Neural generation has since helped scale to open domains (Roemmele and Gordon, 2015; Khalifa et al., 2017) but not with the same level of control over the narrative. Several recent works have looked at adding the narrative scaffolding component back into neural text generating systems Martin et al., 2018; Yao et al., 2019; Xu et al., 2018; Fan et al., 2019) . These systems however still do not utilize an explicit model of narrative dynamics, and are thus restricted in the controllability aspect.", |
|
"cite_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 356, |
|
"text": "(Roemmele and Gordon, 2015;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 378, |
|
"text": "Khalifa et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 578, |
|
"text": "Martin et al., 2018;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 579, |
|
"end": 596, |
|
"text": "Yao et al., 2019;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 613, |
|
"text": "Xu et al., 2018;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 631, |
|
"text": "Fan et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we show how the insight of modeling the structure of a narrative along with general purpose dynamics can be combined with modern neural network based language models. We do this by explicitly modeling the narrative state with a latent vector, and modeling how this state transforms over time as a Switching Linear Dynamical System (SLDS). We show how this formulation captures the concepts of narrative dynamics and scaffolds in a way compatible with current neural generation systems. Finally we show that, by explicitly modeling these dynamics, our models obtain the ability to \"fill in\" narratives; all without being explicitly trained to do so and with no further training required. We evaluate our model with both human evaluation and several automatic measures 2 and show that our model outperforms several strong baselines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we give a brief overview of Switching Dynamical systems and how they can be used to capture both a scaffold of the narrative as well as the narrative dynamics. We then describe in detail the components of our model and its relation to existing models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Switching Dynamical System for Narrative Generation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The specifics of the narrative (characters, setting, etc.), will differ between stories, but as Propp (1928) notes, the way they transition to the next point in the narrative (what we refer to as \"narrative dynamics\") is often shared. Let's say that, as done often, we represent the 'narrative specifics' at time 2 Evaluation in this paper is done on English text data step 3 i with a latent vector Z i . A natural way to explicitly model how this state evolves over time that fits with the above observation is as a Linear Dynamical System:", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 108, |
|
"text": "Propp (1928)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Dynamics in a Dynamical System", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Z i+1 = AZ i + ; \u223c N (0, \u03a3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Dynamics in a Dynamical System", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Where A is a matrix, shared across all narratives, and \u03a3 is a noise term that takes into consideration idiosyncrasies different narratives will have 4 . The fact that the shared transition matrix A is linear means that narratives will have linearly analogous trajectories through time, despite having different details (comparable to stories with different settings but matching structures such as Ran/King Lear, Ulysses/Odyssey, etc). Of course, the fatal flaw of the model is that it assumes there exists only one transition matrix, and thus only one possible way to transition through a narrative!", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Dynamics in a Dynamical System", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "A more fitting model would thus be a Switching Linear Dynamical System (Ackerson and Fu, 1970; Chang and Athans, 1978; Murphy, 1998) . In an SLDS, we assume there exists a set of K different sets of dynamics,", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 94, |
|
"text": "(Ackerson and Fu, 1970;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 95, |
|
"end": 118, |
|
"text": "Chang and Athans, 1978;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 119, |
|
"end": 132, |
|
"text": "Murphy, 1998)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffolds as Switching Variables", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "{(A 1 , \u03a3 1 ), ...(A K , \u03a3 K )}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffolds as Switching Variables", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "At time step i + 1, one of these sets of dynamics is used. The one used depends on the value of a discrete variable at time step i + 1 called the switching variable, S i+1 \u2208 {1, ...K}:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffolds as Switching Variables", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Z i+1 = A S i+1 Z i + ; \u223c N (0, \u03a3 S i+1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffolds as Switching Variables", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "There is a switching variable S i associated with each time step. The switching variable value itself evolves over time by a prior Markov process, P (S i+1 |S i ) 5 . This top level chain of switching variables thus forms our narrative scaffold, indicating what transitions we must go through in the narrative, with the dynamics matrices indicating how they transition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffolds as Switching Variables", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "What the switching variables actually represent can be chosen by the user. Straightforward narrative scaffolds include event sequences (Martin et al., 2018) , keywords (Yao et al., 2019) , or latent template ids (Wiseman et al., 2018) . More complex but potentially more informative scaffolds may be created using concepts such as story grammar nonterminals (Lakoff, 1972; Thorndyke, 1977) , or character action taken throughout a story (Riedl and Young, 2010b).", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 156, |
|
"text": "(Martin et al., 2018)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 186, |
|
"text": "(Yao et al., 2019)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 234, |
|
"text": "(Wiseman et al., 2018)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 372, |
|
"text": "(Lakoff, 1972;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 389, |
|
"text": "Thorndyke, 1977)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffold -Emotional Trajectory", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In our work, we use the sentiment trajectory of the narrative as the scaffold. That is, each S i for a sentence indicates the overall coarse sentiment of the sentence (Positive, Negative, or Neutral). Though simple, the overall sentiment trajectory of a narrative is important in defining the high level 'shape' of a narrative often shared among different narratives (Vonnegut, 1981; Reagan et al., 2016) . Furthermore, sentiment trajectory has been shown to be fairly useful in story understanding tasks (Chaturvedi et al., 2017; Liu et al., 2018) . We discuss in the conclusion future directions for using different types of scaffolds.", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 383, |
|
"text": "(Vonnegut, 1981;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 404, |
|
"text": "Reagan et al., 2016)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 530, |
|
"text": "(Chaturvedi et al., 2017;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 548, |
|
"text": "Liu et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Narrative Scaffold -Emotional Trajectory", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The final component of the model is a conditional language model that generates sentence i conditioned on the current Z i , and all previous sentences, X :i . Generation continues until an <eos> is reached. This conditional language model may be parameterized as desired, but in this work, we parameterize it as an RNN neural network language model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Z i X i Z i+1 X i+1 Z i+2 X i+2 ... ... ... discrete words vector context S i S i+1 S i+2 discrete states", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Figure 1: SLDS Generative model-S i is a discrete state (sentiment of a sentence in a multi-sentence narrative ). Z i is a continuous latent vector that is conditioned on to generate the ith sentence in the narrative , X i . The dynamics of the narrative are completely captured in the dynamical system controlling the latent vector Z. How to transition from Z i to Z i+1 is determined by the state variable S i+1 . Arrows from X i to X i+2 have been left out for clarity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The graphical model for our SLDS is pictured in Figure 1 . The model consists of three sets of variables: (1) Switching variables S 1 , ..., S N , (2) Latent state variables Z 1 , ..., Z N capturing the details of the narrative at sentence i, (3) The sentences themselves", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 56, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "X 1 , ...X N , where each sentence X i has n i words, x i 1 , ...x i n i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The joint over all variables factorizes as below into the following components (X :i stands for all sentences before X i ):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "P (S, Z, X) = ( N i P (X i |Z i , X :i ) ) ( N i P (Z i |Z i\u22121 , S i ) )( N i P (S i |S i\u22121 ) )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Narrative Scaffold Planner: The factor P (S i |S i\u22121 ) is a transition matrix, which we calculate via count based statistics from training. It is fed in as prior knowledge and fixed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Narrative Dynamics Network:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The factor P (Z i |Z i\u22121 , S i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "is determined like a switching linear dynamical system:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Z i = A S i Z i\u22121 + B S i , ; \u223c N (0, I) which is equivalent to drawing Z i from a Nor- mal distribution with mean A S i Z i\u22121 and variance B S i B T S i . Conditional Language model: The factor P (X i |Z i , X :i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "is parameterized by an RNN language model conditioned on the latent Z i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Full Model", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Due to the conditionals parameterized by neural networks we use amortized variational inference in a manner similar to Variational AutoEncoders (Kingma and Welling, 2013), both to learn an approximate posterior q(S, Z|X) and to learn the generative model parameters by maximizing a lower bound on the data likelihood (ELBO). We assume that the approximate posterior factorizes as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Posterior Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "q(S, Z|X) = ( N i q(S i |X))( N i q(Z i |Z i\u22121 , S i , X :i , X i ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Posterior Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Like in VAEs, computing these individual factors is done through a parameterized function called the inference or recognition network whose parameters are trained jointly with the generative model. In our case there are two forms for the factors in our posterior: (1) The first form, q(S i |X) = q S i is parameterized by a classifier that takes in the set of sentences X and outputs a categorical distribution over the switching variables. 2The second form, q(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Posterior Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Z i |Z i\u22121 , S i , X :i , X i ) = q Z i is realized by functions f \u00b5 (Z i\u22121 , S i , X :i , X i ) and f \u03c3 (Z i\u22121 , S i , X :i , X i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Posterior Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "that output the mean and variance, respectively, of a Gaussian over Z i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Posterior Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Borrowing terminology from VAEs, the approximate posterior (the factors given above) act as an 'encoder', while the generative model from the previous section can be seen as the 'decoder'. This type of training has been previously used in (Krishnan et al., 2015 (Krishnan et al., , 2017 Fraccaro et al., 2016 Fraccaro et al., , 2017 Karl et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 261, |
|
"text": "(Krishnan et al., 2015", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 286, |
|
"text": "(Krishnan et al., , 2017", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 308, |
|
"text": "Fraccaro et al., 2016", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 332, |
|
"text": "Fraccaro et al., , 2017", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 351, |
|
"text": "Karl et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning and Posterior Inference", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "As mentioned previously, we optimize all parameters (including the variational factor functions) by optimizing a lower bound on the data likelihood. The model may be trained either with supervision labels for the switching states (in our case, sentiment labels) or without supervised labels. If one is training without the sentiment labels, then the lower bound on the marginal likelihood (and thus our optimization objective) may be written as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lower bound formula & exact training algorithm", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "L = E S 1 ,..S N \u223cq S i M \u2212 N i KL(q S i ||p(S i |S i\u22121 )) where, M = E Z 1 ,..Z N \u223cq Z i N i (log p(X i |Z i ) \u2212 KL(q Z i ||p(Z i |Z i\u22121 , S i )))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lower bound formula & exact training algorithm", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The derivation for this objective is identical to that found in (Krishnan et al., 2017; Fraccaro et al., 2016) , and simply relies on using properties of iterated expectations. All expectations are estimated with Monte Carlo samples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 87, |
|
"text": "(Krishnan et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 110, |
|
"text": "Fraccaro et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lower bound formula & exact training algorithm", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "If training with the sentiment labels S 1 , ..., S N , then the objective is similar (but without the sampling of the switching states), and is augmented with an additional supervision objective as done in Kingma et al. 2014:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lower bound formula & exact training algorithm", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "L S = M + N i q S i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lower bound formula & exact training algorithm", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The final training procedure for a single narrative is: (1) For each sentence (starting from the first), sample the switching state S i from q(S i |X). 2For each sentence (starting from the first), sample the latent Z i from q(Z i |S i , Z i\u22121 , X). (3) Evaluate the data likelihood and KL term(s) with these samples. (4) Take the gradients of objective w.r.t. all parameters, using the reparameterization trick for q Z i (Kingma and Welling, 2013) or the Gumbel-Softmax 6 trick for q S i (Jang et al., 2017) , and optimize.", |
|
"cite_spans": [ |
|
{ |
|
"start": 489, |
|
"end": 508, |
|
"text": "(Jang et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lower bound formula & exact training algorithm", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "One of the benefits of probabilistic formulation is the possibility (if an inference procedure can be found) of generating narratives with specific constraints, where the constraints may be specified as clamped variables in the model. In this section, we show how narratives may be generated conditioned on arbitrary bits and pieces of the narrative already filled in, using approximate Gibbs sampling. This allows one to, for example, interpolate a narrative given the first and the last sentence (similar to how earlier story generation systems were able to generate with a given end goal in mind). Some examples of these interpolations generated by our system can be found in Table 3 . We give the equations and summarize the algorithm in the next sections.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 679, |
|
"end": 686, |
|
"text": "Table 3", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Interpolations via Gibbs Sampling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For our Gibbs sampling algorithm we give the narrative scaffold (switching variables), S 1 , ..., S T \u2208 S and a set of observed sentences, X + . This may be any set of sentences (the first and last, just the second sentence, etc) as inputs to the system. We wish to find values for the unobserved sentences in set X \u2212 by sampling from P (X \u2212 , Z 1 , ..., Z T |S, X + ). We perform this sampling via Gibbs sampling. Two different forms of conditionals need to be derived to do this. One over Z i conditioned on everything else, and one over X i conditioned on everything else.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditionals for Gibbs Sampling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "One can show 7 that the distribution over Z i conditioned on everything else will be approximately proportional to the following Gaussian:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditionals for Gibbs Sampling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "N Z i+1 (A S i+1 Z i , \u03a3 S i+1 )N Z i (f \u00b5 (\u2022), f \u03c3 (\u2022)) (1) \u221d N Z i (\u00b5 * , \u03a3 * ) where, \u03a3 * = A T S i+1 \u03a3 \u22121 S i+1 A S i+1 + f \u03c3 (\u2022) \u22121 \u22121 \u00b5 * =\u03a3 T * Z i+1 \u03a3 \u22121 S i+1 A S i+1 + f \u00b5 (\u2022) T f \u03c3 (\u2022) \u22121 T", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditionals for Gibbs Sampling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To find the second conditional, one can use the d-separation properties of the graph to find that it is proportional to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditionals for Gibbs Sampling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "P (X i |Z i , Z i+1 , S i , S i+1 , X :i , X i+1 ) \u221d P (X i+1 |X :i , X i , Z i+1 )P (X i |X :i , Z i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditionals for Gibbs Sampling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "These two distributions are simply factors of our conditional language model, and both terms can thus be evaluated easily. In theory, one could use this fact to sample the original conditional via Metropolis-Hastings. Unfortunately, we found this approach to be too slow in practice. We observed that the simple heuristic of deterministically assigning X i to be the greedy decoded output of the conditional language model P (X i |X :i , Z i ) works well, as evidenced by the empirical results. We leave it for future work to research different conditional language model parameterizations allowing easy sampling from this conditional 8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conditionals for Gibbs Sampling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The variables in the Gibbs sampler are first initialized using some heuristics (see Supplemental). After initialization, performing interpolations with Gibbs sampling follows a two step process: First, for each Z i , sample a value Z from equation 1and set Z i to Z . Then, for each X i in X \u2212 , find a new value for X i by running greedy decoding using the conditional language model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Gibbs Sampling Interpolation Overview", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We use the ROCStories corpora introduced in Mostafazadeh et al. (2016) . It contains 98,159 short commonsense stories in English as training, and 1,570 stories for validation and test each. Each story in the dataset has five-sentences and captures causal and temporal commonsense relations. We limit our vocabulary size to 16,983 based on a perword frequency cutoff set to 5. For sentiment tags, we automatically tag the entirety of the corpus with the rule based sentiment tagger, Vader (Hutto and Gilbert, 2014), and bucket the polarity scores of Vader into three tags: neutral, negative, and positive. These tags form the label set of the S variables in our SLDS model. We tokenize the stories with Spacy tokenizer (Honnibal and Montani, 2017) . Each sentences in the input narrative has an <eos> tag except for the S2S model discussed below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 70, |
|
"text": "Mostafazadeh et al. (2016)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 746, |
|
"text": "(Honnibal and Montani, 2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset and Preprocessing", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The SLDS has RNN encoder and decoder networks with single layer GRU cells of hidden size 1024 and an input embedding size of 300. We train the model using Adam with the defaults used by PyTorch. We stop training when the validation loss does not decrease for 3 consecutive epochs. Training details for all models and baselines remain same as above unless otherwise mentioned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Switching Linear Dynamical System (SLDS)", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Language Model (LM) : We train a two layer recurrent neural language model with GRU cells of hidden size 512.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We train a two layer neural sequence to sequence model equipped with bi-linear attention function with GRU cells of hidden size 512. Sentiments tags for a narrative (1 for each sentence) are given as input to the model and the corresponding sentences are concatenated together as the output with only one <eos> tag at the end. This model is trained with a 0.1 dropout. This model is comparable to the static model of (Yao et al., 2019) , and other recent works employing a notion of scaffolding into neural generation (albeit adapted for our setting).", |
|
"cite_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 435, |
|
"text": "(Yao et al., 2019)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequence-to-Sequence Attention Model (S2S)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also train a linear dynamical system as discussed in Section 2.1 as one of our baselines for fair comparisons. Apart from having just a single transition matrix this model has the same architectural details as SLDS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Linear Dynamical System (LDS)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To gauge the usability of semi-supervision, we also train semi-supervised SLDS models with varying amount of labelled sentiment tags unlike the original model which uses 100% tagged data. We refer to these as SLDS-X%, where X is the % labelled data used for training: 1%, 10%, 25%, and 50%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-Supervised SLDS (SLDS-X%)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As described above, our model is able to perform narrative interpolations via an approximate Gibbs sampling procedure. At the core of our evaluations is thus a fill-in-the-sentences task. We provide 1 or 2 sentences, and require the model to generate the rest of the narrative . We evaluate this via automatic evaluations as well as with crowd-sourced human evaluations. We also report perplexity to evaluate the models' ability to fit the data. Lastly, we look at whether the transitions learned by the SLDS models capture what they are intended to capture: does using the transition matrix associated with a sentiment tag (positive/negative/neutral) lead to a generated sentence with that sentiment?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For the SLDS models, the interpolations are generated via the Gibbs sampling algorithm described earlier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating the Interpolations", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In all experiments for the SLDS models we draw 50 samples (including burn in samples) and output the interpolation that maximizes the probability of the given sentence(s). Since the baselines do not have the means for doing interpolations, we simulate 'interpolations' for the baselines; we draw 1000 samples using top k (with k=15) truncated sampling (conditioned on the given initial sentences, if available). We then output the sample that maximizes the probability of the clamped sentences around which we are interpolating the others. We allow the S2S access to the gold sentiment tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating the Interpolations", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "To give a lower bound on the performance of the SLDS model, we do not provide it with gold tags. We instead provide the SLDS model with the seminoisy 9 tags that are output from q(S i |X). 10", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generating the Interpolations", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We automatically evaluate on four different types of interpolations (where different combinations of sentences are removed and the model is forced to regenerate them), We evaluate the generations with the ROUGE (Lin, 2004) and METEOR (Banerjee and Lavie, 2005 ) metrics using the true sentences as targets. Table 2 shows the automatic evaluation results from interpolations using our proposed models and baselines. The #Sent(s) column indicates which sentence(s) were removed, and then regenerated by the model. We gave the baselines a slight edge over SLDS because they pick the best out of 1000 samples while SLDS is only out of 50. The SLDS models see their largest gain over the baseline models when at least the first sentence is given as an input. The baseline models do better when the first and second sentence need to be imputed. This is likely due to the fact that having access to the earlier sentences allows a better initialization for the Gibbs sampler. Surprisingly, the semi-supervised variants of the SLDS models achieve higher scores. The reasons for this is discussed below in the Perplexity section.", |
|
"cite_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 222, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 259, |
|
"text": "(Banerjee and Lavie, 2005", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 307, |
|
"end": 314, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation of Interpolations", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "As automatic evaluation metrics are not sufficient to assess the quality of any creative task such as narrative generation, we measure the quality of the generations through human evaluation of 200 stories on the Amazon Mechanical Turk platform. We provided Turkers with two generated narratives from two different models, each with five sentences. The first and last sentences were fed to each model as input, and the middle three sentences were generated. Each pair of narratives is graded by 3 users each with two tasks: (1) to rank on a scale of 0-3 each of the sentences except the first one on the basis of its coherency with the previous sentence(s) and (2) compare and rank the two narratives based on their overall coherency, ie how well the story connects the starting/ending sentences. Table 4 reports the result of human evaluations of SLDS and baseline generations. We can observe that people preferred narratives generated by SLDS over the ones generated by baseline models (LM and S2S) as they found the former model more coherent, which is an important criteria for narrative generation. 51.3% of the time SLDS generates better narratives than the LM model while LM in turn does it only 35.0% of the times. 13.7% of the generations end up in tie. The mean sentence level coherence score for SLDS is around 12.5% larger than that of the LM, with a slightly lower standard Table 2 : F1 scores for ROUGE-1, 2, and L and METEOR (M) (default mode score) for randomly sampled 500 stories from the test set. #Sents(s) column represents the \"fill in\" sentence(s) that the models generated using Gibbs sampling. Our SLDS models pick the best of 50 samples, the baselines models pick the best of 1000 samples Ed was playing baseball in his yard. Last week I had an idea. He was running down the hill.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 797, |
|
"end": 804, |
|
"text": "Table 4", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1387, |
|
"end": 1394, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotation Scheme", |
|
"sec_num": "6.3.1" |
|
}, |
|
{ |
|
"text": "I was so nervous that I decided to make a presentation. His ball was coming towards him.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "I soon found it hard to come up with new ideas. It was very scary! I didn't think it would be so hard. Ed was scared.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "But then, an idea came to me and I was back on track.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "Ben has always wanted to learn how to play the piano Tim was always on his bike during the summer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "His parent bought him one. He had a lot of fun. Ben enrolls in a piano class with a local tutor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "One day he decided to cut his bike down. Ben practiced every day.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "He hit a rock and fell off the bike and hit a tree. He gets better with every lesson.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "He broke his arm. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation Results", |
|
"sec_num": "6.3.2" |
|
}, |
|
{ |
|
"text": "As our models are essentially language models, we evaluated their per-sentence negative log-likelihood and per-word perplexity scores 11 , which can be viewed as an indirect measure of how well a system works as a generative model of narrative text. For the SLDS and LDS models these scores are approximations, an upper bound (the negative of the ELBO) to the actual values. For the other two models the scores are exact. A good model should assign low perplexity scores to its test set. In Table 5 SLDS achieves the lowest scores, implying that it is able to model the data distribution well. In Table 6 we also calculate the perplexity scores for the semisupervised SLDS models to assess the effectiveness of semi-supervised training. Surprisingly, the models with less supervision scored better in terms of perplexity. One possibility for this might be the use of the soft Gumbel-Softmax in the semi-supervised models. The soft Gumbel-Softmax variant does not commit to using a single transition matrix at each time step (instead linearly combining them, weighted by the Softmax weights). This fact may permit the model greater flexibility in fitting the training data. While this leads to better scores in metrics such as perplexity or BLEU, it does leads to transitions that are worse in capturing the properties they should be capturing, as we shall see in the next section. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 491, |
|
"end": 498, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 604, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Language Modeling Perplexity Score", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "One matter of interest is whether or not the transitions are capturing what they are supposed to capture, appropriate sentiment. Since we used the sentiment tagger Vader for training tags, we again utilize it to evaluate whether using transitions of a certain sentiment actually leads the model to produce outputs with the given sentiment. To perform this evaluation, we give as input to our models (and the S2S baseline) the sentiment tags for a sentence and allow it to generate a sentence conditioned on these sentiment tags. We then tag the generated sentences with Vader and see if the sentiment tags match the originals. We calculate the F1 score across all sentiment tags and report the macro average. In Table 7 we see that having labels is incredibly important for meaningful transitions. There is a large drop in F1 as the amount of labels given to the model is decreased. The SLDS model that is trained with 100% of the labels performs a little better than even S2S, despite not having direct access to the sentiment labels (SLDS only uses the sentiment labels to decide which transition to use while the S2S model uses attention directly on the sentiment labels).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 712, |
|
"end": 719, |
|
"text": "Table 7", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation of Transition Dynamics", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "Macro F1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "95.8 SLDS-1%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "S2S", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "50.2 \u00b1 1.1 SLDS-10% 51.4 \u00b1 1.1 SLDS-25% 58.7 \u00b1 0.4 SLDS-50% 74.6 \u00b1 0.1 SLDS 96.1 \u00b1 0.0 ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "S2S", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Story/narrative generation has a rich history in the field of AI. Many early systems were based on structured formalisms for describing common narrative structures (Lakoff, 1972; Thorndyke, 1977; Meehan, 1977) , many being inspired by the initial work of (Propp, 1928) . There has been a swath of recent work that has looked to add some semblance of a 'narrative scaffold' back into generation methods Martin et al., 2018; Yao et al., 2019; Xu et al., 2018) . Many of these methods work as conditional LMs (conditioned directly on the scaffold). This line of work may be combined with our formalization as well, by conditioning the generation on the switching state as well, as done in the model of Barber (2006) . Recent work by Tambwekar et al. (2019) has similar goals to ours in permitting more controlability in generation systems, developing a RL-based system that allows users to specify an end goal for a story (by specifying the event class that is desired to appear at the end). Their work differs from ours in that it does not deal with text directly, modeling only the sequences of events in the narrative. It may be possible to utilize this model as the scaffolding component in our model (utilizing their RL policy for the scaffold planner, rather than the simple Markovian distribution used here).", |
|
"cite_spans": [ |
|
{ |
|
"start": 164, |
|
"end": 178, |
|
"text": "(Lakoff, 1972;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 195, |
|
"text": "Thorndyke, 1977;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 209, |
|
"text": "Meehan, 1977)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 268, |
|
"text": "(Propp, 1928)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 422, |
|
"text": "Martin et al., 2018;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 440, |
|
"text": "Yao et al., 2019;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 457, |
|
"text": "Xu et al., 2018)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 712, |
|
"text": "Barber (2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 730, |
|
"end": 753, |
|
"text": "Tambwekar et al. (2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this paper, we formulated the problem of narrative generation as a switching dynamical system. We showed how this formulation captures notions important in narrative generation, such as narrative dynamics and scaffolds. We developed an approximate Gibbs sampling algorithm for the model that permits the system to generate interpolations conditioned on arbitrary parts of the narrative, and evaluated these interpolations using both human and automatic evaluations. Though in this work we used sentiment tags for our scaffolds/switching variables, future work may look at utilizing different kinds of information to guide the generation of narratives. Utilizing the main predicate of a sentence as a scaffold would be a logical next step, and may prove more informative than the sentiment trajectory. A scaffold such as this can take on many more possible values than a sentiment tag, and as such, it may prove difficult to assign a set of dynamics to each value. Another avenue for future work would deal with this possible problem. One potential solution could be to associate each switching variable value with a (learned) vector in a probability simplex, and use this vector to combine a small set of \"primitive\" dynamics matrices in order to get that value's associated set of dynamics. Table 8 : F1 scores for ROUGE-1, 2, and L and METEOR (M) (default mode score) for randomly sampled 500 stories from the test set. #Sents(s) column represents the \"fill in\" sentence(s) that the models generated using Gibbs sampling. Our SLDS models pick the best of 50 samples, the baselines models pick the best of 1000 samples of some Z i given everything else as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1295, |
|
"end": 1302, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "P (Z i |Z i\u22121 , Z i+1 , S i , S i+1 , X i , X i\u22121 ) \u221d P (Z i+1 , S i+1 |Z i\u22121 , Z i+1 , S i , S i+1 , X i , X i\u22121 , Z i ) * P (Z i |Z i\u22121 , S i , X i , X i\u22121 ) \u2248 P (Z i+1 , S i+1 |Z i\u22121 , S i , S i+1 , X i , X i\u22121 , Z i ) * Q(Z i |Z i\u22121 , S i , X i , X i\u22121 ) = P (Z i+1 |S i+1 , Z i\u22121 , S i , S i+1 , X i , X i\u22121 , Z i ) * P (S i+1 |S i )Q(Z i |Z i\u22121 , S i , X i , X i\u22121 ) \u221d P (Z i+1 |S i+1 , Z i\u22121 , S i , S i+1 , X i , X i\u22121 , Z i ) * Q(Z i |Z i\u22121 , S i , X i , X i\u22121 ) = P (Z i+1 |S i+1 , Z i )Q(Z i |Z i\u22121 , S i , X i , X i\u22121 ) = N Z i+1 (A S i+1 Z i , \u03a3 S i+1 )N Z i (f \u00b5 (\u2022), f \u03c3 (\u2022))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The rest can be derived by taking the PDFs of the two Gaussian densities above, getting rid of constants that don't depend on Z i , multiplying them together, and completing the square to obtain the numerator of a Gaussian over Z i (such that Z i appears nowhere else in the equation). This numerator can then be multiplied by the normalizing constant (that does not depend on Z i ) to obtain exactly a Gaussian pdf with the mean and variance as given below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "N Z i+1 (A S i+1 Z i , \u03a3 S i+1 )N Z i (f \u00b5 (\u2022), f \u03c3 (\u2022)) \u221d N Z i (\u00b5 * , \u03a3 * )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "where,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "\u03a3 * = A T S i+1 \u03a3 \u22121 S i+1 A S i+1 + f \u03c3 (\u2022) \u22121 \u22121 \u00b5 * =\u03a3 T * Z i+1 \u03a3 \u22121 S i+1 A S i+1 + f \u00b5 (\u2022) T f \u03c3 (\u2022) \u22121 T", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "We evaluate the quality of sentences of 20 generated narratives are not considered coherent by Turkers. We find that, in a broader context, 16 stories out of 20 are not good enough in terms of connecting the ending with the previous sentences. Also, in 14 out of 20 stories, mild off-topic sentences are introduced, which are aligned with the main topic of the story but not along with local coherency (i.e. the previous and the next sentences). When considering a narrower context, or sentence level, we confirm that only 9 out of 60 generated sentences are ungrammatical, so they fail to deliver their meaning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Manual Error Analysis of Generations", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "Initializing Z : We first initialize the Z variables in the sampler. This is done as follows: The Z variables are initialized in increasing order. If sentence X i is provided as input, then we sample from the approximate posterior q Z in order to initialize Z i . If X i is missing, then we sample using the dynamics distribution, P (Z i |Z i\u22121 , S i ). Since we initialize in increasing order, we are guaranteed to have Z i\u22121 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initializing the Gibbs Sampler", |
|
"sec_num": "11" |
|
}, |
|
{ |
|
"text": "Initializing X : We next initialize the missing text. Initializing the missing text X i is simply done by greedy decoding from the language model conditioned on Z i , and previous sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Initializing the Gibbs Sampler", |
|
"sec_num": "11" |
|
}, |
|
{ |
|
"text": "Below in Table 8 we provide the results for the automatic experiments with gold labels fed to our method. We find a sizable increase in performance by providing the gold labels, and thus use the automatic labels as noisy proxies in order to establish a lower bound on performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 16, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation Results with Gold Labels", |
|
"sec_num": "12" |
|
}, |
|
{ |
|
"text": "In our case, we take each sentence in the narrative to be a different timestep. Different levels of granularity for a timestep may be more befitting for other domains.4 Note that a bias term may also be added here (we do this in our implementation). We leave the bias off here for clarity5 Other ways to formulate this transformation are also possible(Barber, 2006;Linderman et al., 2016). The Markov assumption is a common one and we use it here for simplicity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the soft variant of Gumbel-Softmax. Rather than forcing a hard choice for Si, we directly use the Gumbel-Softmax output and combine the transition matrices via a convex combination", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See Supplemental for derivation 8 One possibility is take advantage of 'orderless' pretrained models through sampling(Wang and Cho, 2019). Approaches such as the one recently proposed inDonahue et al. (2020) may also be useful for this purpose.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To confirm that these tags are noisier, we repeat the experiments inTable 2, but feed gold tags to our models. We find that the gold tags lead to a sizable increase in performance for our model. See Appendix for more detail.10 Note that missing sentences, X, are used only for computing these noisy tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that since S2S appends the eos token only at the end, its per-sentence NLL is slightly lower than that of LM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "With the d-separation properties of the graph (and substituting in our variational posterior approximation Q), we can write the conditional distribution", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendix 9 Gibbs Sampling Derivation", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "On state estimation in switching environments", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Ackerson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1970, |
|
"venue": "IEEE Transactions on Automatic Control", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "10--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G Ackerson and K Fu. 1970. On state estimation in switching environments. IEEE Transactions on Au- tomatic Control, 15(1):10-17.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Satanjeev", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with im- proved correlation with human judgments. In Pro- ceedings of the ACL Workshop on Intrinsic and Ex- trinsic Evaluation Measures for Machine Transla- tion and/or Summarization, pages 65-72, Ann Ar- bor, Michigan. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Expectation correction for smoothed inference in switching linear dynamical systems", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Barber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "2515--2540", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Barber. 2006. Expectation correction for smoothed inference in switching linear dynamical systems. Journal of Machine Learning Research, 7(Nov):2515-2540.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "State estimation for discrete systems with switching parameters", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Athans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1978, |
|
"venue": "IEEE Transactions on Aerospace and Electronic Systems, AES", |
|
"volume": "14", |
|
"issue": "3", |
|
"pages": "418--425", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TAES.1978.308603" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. B. Chang and M. Athans. 1978. State estimation for discrete systems with switching parameters. IEEE Transactions on Aerospace and Electronic Systems, AES-14(3):418-425.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Story comprehension for predicting what happens next", |
|
"authors": [ |
|
{ |
|
"first": "Snigdha", |
|
"middle": [], |
|
"last": "Chaturvedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haoruo", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1603--1614", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1168" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Snigdha Chaturvedi, Haoruo Peng, and Dan Roth. 2017. Story comprehension for predicting what hap- pens next. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Process- ing, pages 1603-1614, Copenhagen, Denmark. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Enabling language models to fill in the blanks", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Donahue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2492--2501", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.225" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Donahue, Mina Lee, and Percy Liang. 2020. En- abling language models to fill in the blanks. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 2492- 2501, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Hierarchical neural story generation", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Mike Lewis, and Yann Dauphin. Hierar- chical neural story generation. In Proceedings of the 56th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Strategies for structuring story generation", |
|
"authors": [ |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2650--2660", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Angela Fan, Mike Lewis, and Yann Dauphin. 2019. Strategies for structuring story generation. In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 2650- 2660.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A disentangled recognition and nonlinear dynamics model for unsupervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Fraccaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Kamronn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Paquet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ole", |
|
"middle": [], |
|
"last": "Winther", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Fraccaro, Simon Kamronn, Ulrich Paquet, and Ole Winther. 2017. A disentangled recognition and nonlinear dynamics model for unsupervised learn- ing. In NeurIPS 2017.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Sequential neural models with stochastic layers", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Fraccaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00f8ren Kaae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "S\u00f8nderby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ole", |
|
"middle": [], |
|
"last": "Paquet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Winther", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Fraccaro, S\u00f8ren Kaae S\u00f8nderby, Ulrich Paquet, and Ole Winther. 2016. Sequential neural models with stochastic layers. In NeurIPS 2016.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "2017. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Montani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Ines Montani. 2017. spaCy 2: Natural language understanding with Bloom embed- dings, convolutional neural networks and incremen- tal parsing. To appear.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Vader: A parsimonious rule-based model for sentiment analysis of social media text", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Clayton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Hutto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "AAAI conference on weblogs and social media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clayton J Hutto and Eric Gilbert. 2014. Vader: A parsi- monious rule-based model for sentiment analysis of social media text. In AAAI conference on weblogs and social media.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Categorical reparameterization with gumbel-softmax", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shixiang", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Poole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Jang, Shixiang Gu, and Ben Poole. 2017. Cate- gorical reparameterization with gumbel-softmax. In ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Deep variational bayes filters: Unsupervised learning of state space models from raw data", |
|
"authors": [ |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Karl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "S\u00f6lch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Bayer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Van Der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smagt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maximilian Karl, Maximilian S\u00f6lch, Justin Bayer, and Patrick van der Smagt. 2017. Deep variational bayes filters: Unsupervised learning of state space models from raw data. ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Autoencoding variational bayes", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1312.6114" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Max Welling. 2013. Auto- encoding variational bayes. arXiv preprint arXiv:1312.6114.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Semi-supervised learning with deep generative models", |
|
"authors": [ |
|
{ |
|
"first": "Shakir", |
|
"middle": [], |
|
"last": "Durk P Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Jimenez Rezende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Durk P Kingma, Shakir Mohamed, Danilo Jimenez Rezende, and Max Welling. 2014. Semi-supervised learning with deep generative models. In NeurIPS 2014.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Deep kalman filters. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rahul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Shalit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sontag", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul G. Krishnan, Uri Shalit, and David Sontag. 2015. Deep kalman filters. CoRR.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Structured inference networks for nonlinear state space models", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rahul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Shalit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sontag", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rahul G. Krishnan, Uri Shalit, and David Sontag. 2017. Structured inference networks for nonlinear state space models. In AAAI 2017.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Structural complexity in fairy tales", |
|
"authors": [ |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Lakoff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1972, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Lakoff. 1972. Structural complexity in fairy tales.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Story-telling as planning and learning", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Lebowitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1985, |
|
"venue": "Poetics", |
|
"volume": "14", |
|
"issue": "6", |
|
"pages": "483--502", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Lebowitz. 1985. Story-telling as planning and learning. Poetics, 14(6):483-502.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Rouge: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In ACL 2004.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Narrative modeling with memory chains and semantic supervision", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "278--284", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2045" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Liu, Trevor Cohn, and Timothy Baldwin. 2018. Narrative modeling with memory chains and seman- tic supervision. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 278- 284, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Event representations for automated story generation with deep neural nets", |
|
"authors": [ |
|
{ |
|
"first": "Lara", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prithviraj", |
|
"middle": [], |
|
"last": "Ammanabrolu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Hancock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brent", |
|
"middle": [], |
|
"last": "Harrison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Riedl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lara J. Martin, Prithviraj Ammanabrolu, William Han- cock, Shruti Singh, Brent Harrison, and Mark O. Riedl. 2018. Event representations for automated story generation with deep neural nets. AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Tale-spin, an interactive program that writes stories", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Meehan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--98", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James R. Meehan. 1977. Tale-spin, an interactive pro- gram that writes stories. In IJCAI, pages 91-98.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"authors": [ |
|
{ |
|
"first": "Nasrin", |
|
"middle": [], |
|
"last": "Mostafazadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathanael", |
|
"middle": [], |
|
"last": "Chambers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devi", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhruv", |
|
"middle": [], |
|
"last": "Batra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushmeet", |
|
"middle": [], |
|
"last": "Kohli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Allen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "839--849", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1098" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James F. Allen. 2016. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 839-849, San Diego, California. Association for Computational Linguistics. [link].", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Switching kalman filters", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Kevin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Murphy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin P Murphy. 1998. Switching kalman filters.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "A dictionary of narratology", |
|
"authors": [ |
|
{ |
|
"first": "Gerald", |
|
"middle": [], |
|
"last": "Prince", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gerald Prince. 2003. A dictionary of narratology.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Morphology of the Folktale", |
|
"authors": [ |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Propp", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1928, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir Propp. 1928. Morphology of the Folktale.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The emotional arcs of stories are dominated by six basic shapes", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lewis", |
|
"middle": [], |
|
"last": "Reagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilan", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kiley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"Sheridan" |
|
], |
|
"last": "Danforth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dodds", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EPJ Data Science", |
|
"volume": "5", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew J Reagan, Lewis Mitchell, Dilan Kiley, Christopher M Danforth, and Peter Sheridan Dodds. 2016. The emotional arcs of stories are dominated by six basic shapes. EPJ Data Science, 5(1):31.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Story planning as exploratory creativity: Techniques for expanding the narrative search space", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"Michael" |
|
], |
|
"last": "Riedl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "New Generation Computing", |
|
"volume": "24", |
|
"issue": "3", |
|
"pages": "303--323", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/BF03037337" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark O. Riedl and R. Michael Young. 2006. Story planning as exploratory creativity: Techniques for expanding the narrative search space. New Genera- tion Computing, 24(3):303-323.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Narrative planning: Balancing plot and character", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"Michael" |
|
], |
|
"last": "Riedl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "J. Artif. Intell. Res", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "217--268", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1613/jair.2989" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark O. Riedl and Robert Michael Young. 2010a. Nar- rative planning: Balancing plot and character. J. Ar- tif. Intell. Res., 39:217-268.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Narrative planning: Balancing plot and character", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"Michael" |
|
], |
|
"last": "Riedl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "J. Artif. Intell. Res", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "217--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark O. Riedl and Robert Michael Young. 2010b. Nar- rative planning: Balancing plot and character. J. Ar- tif. Intell. Res., 39:217-268.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Creative help: a story writing assistant", |
|
"authors": [ |
|
{ |
|
"first": "Melissa", |
|
"middle": [], |
|
"last": "Roemmele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew S", |
|
"middle": [], |
|
"last": "Gordon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Interactive Digital Storytelling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--92", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Melissa Roemmele and Andrew S Gordon. 2015. Cre- ative help: a story writing assistant. In Interna- tional Conference on Interactive Digital Storytelling, pages 81-92. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Controllable neural story generation via reinforcement learning. IJCAI", |
|
"authors": [ |
|
{ |
|
"first": "Pradyumna", |
|
"middle": [], |
|
"last": "Tambwekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Murtaza", |
|
"middle": [], |
|
"last": "Dhuliawala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Mehta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brent", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Harrison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Riedl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pradyumna Tambwekar, Murtaza Dhuliawala, Ani- mesh Mehta, Lara J Martin, Brent Harrison, and Mark O Riedl. 2019. Controllable neural story gen- eration via reinforcement learning. IJCAI.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Cognitive structures in comprehension and memory of narrative discourse", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Perry W Thorndyke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Cognitive psychology", |
|
"volume": "9", |
|
"issue": "1", |
|
"pages": "77--110", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Perry W Thorndyke. 1977. Cognitive structures in comprehension and memory of narrative discourse. Cognitive psychology, 9(1):77-110.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Minstrel: A Computer Model of Creativity and Storytelling", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Scott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scott R. Turner. 1993. Minstrel: A Computer Model of Creativity and Storytelling. Ph.D. thesis.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "BERT has a mouth, and it must speak: BERT as a Markov random field language model", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Methods for Optimizing and Evaluating Neural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--36", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-2304" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang and Kyunghyun Cho. 2019. BERT has a mouth, and it must speak: BERT as a Markov random field language model. In Proceedings of the Workshop on Methods for Optimizing and Eval- uating Neural Language Generation, pages 30-36, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Learning neural templates for text generation", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Wiseman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Stuart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander M", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3174--3187", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1356" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Wiseman, Stuart M Shieber, and Alexander M Rush. 2018. Learning neural templates for text gen- eration. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3174-3187, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "A skeleton-based model for promoting coherence among sentences in narrative story generation", |
|
"authors": [ |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyan", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4306--4315", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1462" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingjing Xu, Yi Zhang, Qi Zeng, Xuancheng Ren, Xi- aoyan Cai, and Xu Sun. 2018. A skeleton-based model for promoting coherence among sentences in narrative story generation. In Proceedings of the 2018 Conference on Empirical Methods in Natu- ral Language Processing, pages 4306-4315, Brus- sels, Belgium. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Planand-write: Towards better automatic storytelling", |
|
"authors": [ |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nanyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralph", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Weischedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lili Yao, Nanyun Peng, Ralph M. Weischedel, Kevin Knight, Dongyan Zhao, and Rui Yan. 2019. Plan- and-write: Towards better automatic storytelling. AAAI, abs/1811.05701.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"content": "<table><tr><td colspan=\"3\">deviation. We see similar results when compared</td></tr><tr><td colspan=\"2\">against the S2S model.</td><td/></tr><tr><td colspan=\"3\">System Sent Coh. (0-3) Best Story</td></tr><tr><td>LM</td><td>1.68 \u00b1 1.01</td><td>35.0%</td></tr><tr><td>SLDS</td><td>1.89 \u00b1 0.96</td><td>51.3%</td></tr><tr><td>S2S</td><td>1.67 \u00b1 1.00</td><td>35.1%</td></tr><tr><td>SLDS</td><td>1.87 \u00b1 0.97</td><td>51.9%</td></tr></table>", |
|
"text": "Sample interpolations from Gibbs sampling. Grayed out lines are provided as input and bold sentences are generated by SLDS.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"text": "Human evaluation scores for filled-in narrative generation. Humans judged sentence coherence and chose which model filled in the most coherent narrative overall (13.7% and 13% tie for LM and S2S).", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td>System</td><td>NLL</td><td>PPL</td></tr><tr><td>SLDS-1%</td><td colspan=\"2\">\u2264177.60 25.19</td></tr><tr><td colspan=\"3\">SLDS-10% \u2264178.81 25.77</td></tr><tr><td colspan=\"3\">SLDS-25% \u2264181.11 26.87</td></tr><tr><td colspan=\"3\">SLDS-50% \u2264185.07 28.88</td></tr><tr><td>SLDS</td><td colspan=\"2\">\u2264182.17 27.39</td></tr></table>", |
|
"text": "NLL and PPL scores on the test set. Lower is better for both the metrics. Variance in NLL calculation is in the order of 10 \u22123 .", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"text": "Approximate NLL and PPL scores for SLDS and semi-supervised SLDS on the test set.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>: Macro F1 scores on sentiment classification</td></tr><tr><td>task. Results for SLDS and SLDS-X% are averaged</td></tr><tr><td>over 5 runs.</td></tr></table>", |
|
"text": "", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |