|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:38:56.533381Z" |
|
}, |
|
"title": "ESTIME: Estimation of Summary-to-Text Inconsistency by Mismatched Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Oleg", |
|
"middle": [], |
|
"last": "Vasilyev", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Primer Technologies Inc. San Francisco", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bohannon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Primer Technologies Inc. San Francisco", |
|
"location": { |
|
"country": "California" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We propose a new reference-free summary quality evaluation measure, with emphasis on the faithfulness. The measure is based on finding and counting all probable potential inconsistencies of the summary with respect to the source document. The proposed ESTIME, Estimator of Summary-to-Text Inconsistency by Mismatched Embeddings, correlates with expert scores in summary-level SummEval dataset stronger than other common evaluation measures not only in Consistency but also in Fluency. We also introduce a method of generating subtle factual errors in human summaries. We show that ESTIME is more sensitive to subtle errors than other common evaluation measures.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We propose a new reference-free summary quality evaluation measure, with emphasis on the faithfulness. The measure is based on finding and counting all probable potential inconsistencies of the summary with respect to the source document. The proposed ESTIME, Estimator of Summary-to-Text Inconsistency by Mismatched Embeddings, correlates with expert scores in summary-level SummEval dataset stronger than other common evaluation measures not only in Consistency but also in Fluency. We also introduce a method of generating subtle factual errors in human summaries. We show that ESTIME is more sensitive to subtle errors than other common evaluation measures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Summarization must preserve the factual consistency of the summary with the text. Human annotation of factual consistency can be accompanied with detailed classification of factual errors, thus giving a hope that the annotation scores are reasonably objective (Kryscinski et al., 2020; Huang et al., 2020; Vasilyev et al., 2020b; Gabriel et al., 2020; Maynez et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 260, |
|
"end": 285, |
|
"text": "(Kryscinski et al., 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 305, |
|
"text": "Huang et al., 2020;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 329, |
|
"text": "Vasilyev et al., 2020b;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 351, |
|
"text": "Gabriel et al., 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 372, |
|
"text": "Maynez et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Factual consistency of a summary is one of several summary qualities; for the purpose of human annotation these qualities can be specified in different ways (Xenouleas et al., 2019; Kryscinski et al., 2020; Fan et al., 2018; Vasilyev et al., 2020b; Fabbri et al., 2020) . Summarization models nowadays create satisfactorily fluent, coherent and informative summaries, but the factual consistency suffers from hallucinations, entity swaps and other errors. Some factual errors are easily noticeable; other factual errors could be hardly noticeable even for annotators (Lux et al., 2020; Vasilyev et al., 2020b) -which is arguably even worse.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 181, |
|
"text": "(Xenouleas et al., 2019;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 206, |
|
"text": "Kryscinski et al., 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 224, |
|
"text": "Fan et al., 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 248, |
|
"text": "Vasilyev et al., 2020b;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 269, |
|
"text": "Fabbri et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 585, |
|
"text": "(Lux et al., 2020;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 586, |
|
"end": 609, |
|
"text": "Vasilyev et al., 2020b)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Existing summary evaluation measures are based on several approaches, which may be sensitive to some qualities more than to others. A questionanswering based evaluation estimates how helpful is the summary in answering questions about the source text (Xenouleas et al., 2019; Eyal et al., 2019; Scialom et al., 2019; Deutsch et al., 2020; Durmus et al., 2020; . A text reconstruction approach estimates how helpful is the summary in guessing parts of the source text (Vasilyev et al., 2020a,b; Egan et al., 2021) . Evaluation measures that use some kind of text similarity can estimate how similar is the summary to special human-written reference summaries Zhao et al., 2019; Lin, 2004) , or, more realistically, how similar is the summary to the source text Louis and Nenkova, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 251, |
|
"end": 275, |
|
"text": "(Xenouleas et al., 2019;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 276, |
|
"end": 294, |
|
"text": "Eyal et al., 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 316, |
|
"text": "Scialom et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 338, |
|
"text": "Deutsch et al., 2020;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 359, |
|
"text": "Durmus et al., 2020;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 493, |
|
"text": "(Vasilyev et al., 2020a,b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 512, |
|
"text": "Egan et al., 2021)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 676, |
|
"text": "Zhao et al., 2019;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 687, |
|
"text": "Lin, 2004)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 784, |
|
"text": "Louis and Nenkova, 2009)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to assess how well an evaluation measure works for factual consistency, it is necessary either to have a dataset of human-annotated imperfect machine-generated summaries (Bhandari et al., 2020; Fabbri et al., 2020) , or to have a dataset of artificially introduced factual errors in originally factually correct human-written summaries (Kryscinski et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 202, |
|
"text": "(Bhandari et al., 2020;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 223, |
|
"text": "Fabbri et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 370, |
|
"text": "(Kryscinski et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper we focus on presenting a new evaluation measure with emphasis on factual consistency. Our contribution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. We introduce ESTIME: Estimator of Summary-to-Text Inconsistency by Mismatched Embeddings 1 . Using humanannotated machine-generated summaries of SummEval (Fabbri et al., 2020) , we compare ESTIME with other evaluation measures. 2. We introduce a natural method of generating subtle factual errors. We use it here to compare the performance of ESTIME with other measures on human-written summaries with generated subtle errors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 178, |
|
"text": "(Fabbri et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The motivations for our estimator: 1. Any location in a summary has a context that loosely corresponds to a context in one or more locations in the text. 2. In the most similar context, the summary would normally use the same word that was used in the text. 3. Summary generation models produce very few new (not from the text) words per summary. 4. Transformer-made token embeddings are highly contextual (Ethayarajh, 2019) . In order to estimate the consistency of a summary with the text, we attempt to count all the summary tokens that could be potentially related to a factual error. To this end, we check embeddings of all the tokens of the summary that have one or more occurrences in the text. For each embedding we find its match: the most similar embedding in the text. If the corresponding tokens are not the same, we add up such mismatch into our score of inconsistency. Our goal is not an error correction or precise location of errors, but a score estimating the summary consistency quality. The algorithm is simple:", |
|
"cite_spans": [ |
|
{ |
|
"start": 406, |
|
"end": 424, |
|
"text": "(Ethayarajh, 2019)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "1. Obtain embeddings for all tokens in the text. In the summary, obtain embeddings only for the tokens that occur at least once in the text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "To obtain an embedding of a token, mask the token, run a token-prediction transformer model on the context surrounding the token, and take the embedding of the token from a hidden layer. 2. For each of the obtained embeddings of the summary tokens, find the most similar embedding in the text. If the corresponding tokens do not coincide, count this as a potential inconsistency. The total number of such inconsistencies is our score, ESTIME. We measure 'similarity' of embeddings by their scalar product. Thus, ESTIME score is the number N a of 'alarms':", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "N a = i:t i \u2208T H( max \u03b2:t \u03b2 =t i (e i e \u03b2 ) \u2212 s(i))", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s(i) \u2261 max \u03b1:t \u03b1 =t i (e i e \u03b1 )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "Here H is Heaviside function; the summary is a sequence of tokens t i , each having embedding e i ; the text T is a sequence of tokens t \u03b1 , having embeddings e \u03b1 . The summation in Equation 1 is over all the summary tokens t i that exist in the text T . The count N a gets added +1 whenever the best match to e i from the embeddings of unequal tokens e \u03b2 exceeds the best match from the embeddings of occurrences of the same token t \u03b1 = t i in the text. The tokens are obtained by the tokenizer corresponding to the token-prediction transformer model. Notice that we do not verify the summary tokens that do not occur in the text. Such tokens still can influence the context used for embeddings of other tokens. The algorithm is asymmetric with respect to the summary and the text: it is supposed to estimate summary-to-text inconsistency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "This approach is different from matching embeddings for sake of measuring similarity (e.g. similarity between a summary and a reference summary in BERTScore , and from using a model trained to replace wrong tokens with correct ones (Cao et al., 2020; Kryscinski et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 250, |
|
"text": "(Cao et al., 2020;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 275, |
|
"text": "Kryscinski et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "The embeddings are taken using the pretrained BERT model (Devlin et al., 2019) bert-largeuncased-whole-word-masking of Transformers library (Wolf et al., 2020) . While there is no crucial difference with other varieties of BERT, ALBERT and RoBERTa, this model showed a better overall performance, and we used it for evaluations in the next sections. For the sake of faster processing, we do not run the model separately for each token, but at a single run obtain embeddings for all tokens separated by the distance of 8 tokens. This means that the context for each masked token is a little muddied by masking of a few other tokens, but the distance of 8 tokens is large enough for the effect to be negligible. The results of the next sections are obtained with input size of 450 tokens (close to max BERT input length). Finally, when input window does not touch the beginning or end of the text, we do not mask the tokens too close to the edge of the window: no masking within the margin of 50 tokens at the edges of the input window. The algorithm is simple, but for convenience we provide the code 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 78, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 159, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "In the next sections we present results for the versions ESTIME-12 and ESTIME-24, corresponding to the embeddings from the middle (12th layer) and from the top (24th layer) of the large BERT; as explained later we also consider ESTIME-21. 3 Performance on human-annotated machine-generated summaries", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2 Methods", |
|
"sec_num": "95" |
|
}, |
|
{ |
|
"text": "We used SummEval dataset 3 (Fabbri et al., 2020) for comparing ESTIME with a few well known or promising evaluation measures. The part of Sum-mEval dataset that we use consists of 100 texts, each text is accompanied by 16 summaries generated by 16 different models, making altogether 1600 text-summary pairs. Each text-summary pair is annotated (on scale 1 to 5) by 3 experts for 4 qualities: consistency, relevance, coherence and fluency. We took average of the expert scores for each quality of a text-summary pair. Each text is also accompanied by 11 human-written reference summaries, for the measures that need them. (In latest version of (Fabbri et al., 2020) a 17th model -Pegasus dynamic mix -is added to the annotations.)", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 48, |
|
"text": "(Fabbri et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Correlations with expert scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We calculated scores of ESTIME and other measures for all the 1600 summaries, and presented their correlations with the average expert scores in Table 1 . The measures in Table 1 are split into the group of reference-free measures (top) and the measures requiring human-written references (bot-tom). All the measures are based on certain principles rather than on finetuning on some humanannotated datasets. Here BLANC-help (Vasilyev et al., 2020a) is calculated in two versions 4 , which differ by the underlying models: BLU -bert-largeuncased, and AXXL -albert-xxlarge-v2. ESTIME and Jensen-Shannon (Louis and Nenkova, 2009) values are negated. SummaQA (Scialom et al., 2019 ) is represented by SummaQA-P (prob) and SummaQA-F1 (F1 score) 5 . SUPERT is calculated as single-doc with 20 reference sentences 'top20' 6 (using bert-large-nli-stsb-meantokens). BLEU (Papineni et al., 2002) is calculated with NLTK. BERTScore (by default 7 using roberta-large) is represented by F1, precision (P) and recall (R). For ROUGE (Lin, 2004) the ROUGE-L is calculated as rougeLsum 8 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 448, |
|
"text": "(Vasilyev et al., 2020a)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 626, |
|
"text": "(Louis and Nenkova, 2009)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 676, |
|
"text": "(Scialom et al., 2019", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 862, |
|
"end": 885, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1018, |
|
"end": 1029, |
|
"text": "(Lin, 2004)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 152, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 178, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Correlations with expert scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "By design ESTIME should perform well for consistency, and indeed it beats other measures in the table. Being a one-sided summary-to-text estimator of inconsistencies, ESTIME should not and does not perform well for relevance. ESTIME performs better than other measures for fluency, and reasonably well for coherence (ESTIME-21 is better for coherence than the rest of the reference-free metrics). Interestingly, a comparison of ESTIME-12 vs ESTIME-24 shows that the middle of the transformer knows better than the top about all the summary qualities except the fluency. In Appendix A we show and discuss a curious pattern of dependency of correlations on the embeddings layer. Correlations with all qualities peak around the layer 21, then sharply drop by the top layer 24. This is the reason we added ESTIME-21 to the table. While ESTIME-21 is the best choice, each of the three shown ESTIME versions is better than the rest of the measures for consistency and fluency. In Table 2 we show correlations on system level, meaning that the scores (of automated measures and of human experts) are averaged over the 100 texts, so that each array of scores has length only 16 rather than 1600 (Fabbri et al., 2020) . The purpose of this would be a comparison of the summarization models. The results are shown for consistency only; for other qualities some measures have p-value higher than 0.05. The ranking of the measures changes with averaging over the texts (Table 2 vs Table 1 ). We may speculate that some measures may be more sensitive to the model generation style which can lead to less errors or more errors on average; other measures may be more sensitive to specific factual errors in each summary. If it is true, we would have to be cautious about the measures that do well on the system level and do not do well on the summary level.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1187, |
|
"end": 1208, |
|
"text": "(Fabbri et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 974, |
|
"end": 981, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1457, |
|
"end": 1477, |
|
"text": "(Table 2 vs Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Correlations with expert scores", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "While we must be cautious about picking evaluation measure version most fitting human scores , using ESTIME-21 is probably justified by simultaneous maximum at level 21 for all four summary qualities, as shown in Figures 1 and 2 in Appendix A. For our definition of ESTIME we preferred N a of Equation 1 rather than the alternative definition N w of Equation 3 in Appendix B. N w is counting all the text tokens that managed to 'win', i.e. to be closer to a summary token than any text occurrence of the summary token. We are concerned that if the summary token is bad (inconsistent with its context), the number of the 'winners' is large and might be fairly arbitrary.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 228, |
|
"text": "Figures 1 and 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We defined ESTIME in Equations 1 and 2 by using simple scalar product of embeddings. In Appendix C we show that using normalized embeddings only makes the correlations worse, by almost fully erasing the 'Layer-21 maximum'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In Appendix D we give an example of switching to a simpler underlying model: bert-base-uncased. This slightly weakens the correlations, and makes the dependency on the layer id less sharp.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In Appendix E we give an example of excluding part of speech tokens from consideration by ESTIME. This means that the summation in Equation 1 will use only tokens t i of some parts of speech, and that the max will similarly restrict the tokens t \u03b2 . Despite high frequency of determiners in texts, the omission of the determiners from ESTIME makes almost no difference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As explained in Section 2, in obtaining embeddings we are using a somewhat spoiled context, because we mask many tokens in a single input (albeit requiring the masks to be reasonably separated). In Appendix F we show that our separation requirements are indeed reasonable, and making them twice more strict barely change the correlations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Machine-generated summaries, even by abstractive summarization models, generally follow the source text by frequently reproducing large spans from it. Human summaries are more varied in describing the source text, and it is interesting how useful can be ESTIME for evaluating them. Fundamentally, we are asking how flexible are the embeddings in understanding the context. In order to answer this question, we made random selection of 2000 text-summary pairs from CNN/Daily Mail dataset (Hermann et al., 2015) . For each human-written summary we then added the same summary modified by generated factual errors. We thus made 4000 text-summary pairs. We assigned the 'golden' scores as 1 to each clean summary, and 0 to each summary with errors. Our 'subtle errors' generation method is simple, heuristic-free and easily reproducible. In order to generate an error, we randomly select a wholeword token in the summary, mask and predict it by an LM model (we used bert-base-cased). We then select the top predicted candidate that is not equal to the real token, and substitute it for the real token. The resulting subtle errors are similar to real machine-generated mishaps and hallucinations, with the fluency preserved.", |
|
"cite_spans": [ |
|
{ |
|
"start": 487, |
|
"end": 509, |
|
"text": "(Hermann et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Performance on human summaries with generated subtle errors", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The evaluation task is now more difficult: the summaries are human-written, and the errors are subtle. Without labeling of the generated errors, we cannot be confident of always having real factual errors: large part of the generated errors are indeed truly factual errors, but the rest disturb coherence or fluency, or make synonyms. For purposes of a preliminary simple evaluation here, and to ensure high probability of having true errors, we generated 3 random errors in each 'score=0' summary. Table 3 shows that ESTIME is more sensitive to the generated errors than other measures. Only reference-free measures could be applied in this situation. All p-values in the table are less than 10 \u22123 , except 0.023 for BLANC-AXXL, 0.002 for Jensen-Shannon and 0.001 for SummaQA-P.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 499, |
|
"end": 506, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance on human summaries with generated subtle errors", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In Table 3 the correlation of ESTIME-21 with generated errors turns out to be lower than the correlation of ESTIME-24. If we guessed correctly in Apendix A about the reasons for the drop of the correlations between the layers 21 and 24 in Figures 1 and 2 , then the relatively high value of ESTIME-24 indicates that it may have additionally benefited from an information relevant to predicting tokens, even when the generated token replacements are not factual errors. In the near future we plan to follow up these evaluations on a large fully labeled dataset of 'subtle errors'. Table 3 : Correlation \u03c1 (Spearman) and \u03c4 (Kendall Tauc) of quality estimators with the presence of generated subtle errors in human summary. The dataset of 4000 text-summary pairs was created by random pick of 2000 test-summary pairs from CNN / Daily Mail dataset, duplicating these 2000 pairs, and by generating subtle errors in the 2000 duplicated summaries.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 239, |
|
"end": 255, |
|
"text": "Figures 1 and 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 588, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Performance on human summaries with generated subtle errors", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We introduced ESTIME: estimator of summary-totext inconsistency by mismatched embeddings, -a measure of summary quality with emphasis on measuring factual inconsistency between the summary and the text. The fact that this simple measure correlates with human-labeled consistency and fluency much better than more complex measures tells us about the current state of summary evaluation, and about the power of contextual embeddings. We also introduced a method for generating subtle errors; the method has a potential for creating consistent and realistic benchmark datasets for factual consistency. In the near future we intend to release such fully labeled flexible dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "It is natural to expect that embeddings from top layer would be good in characterizing context for a token. In Figures 1 and 2 we show correlations of SummEval expert scores with ESTIME versions that are defined by a model layer from which the embeddings are taken. (The model is bert-largeuncased-whole-word-masking.) Immediate observation about the dependency of the correlation value on the model layer is that after reaching maximum around layer 21, the correlation value quickly drops at higher layers. At low levels the correlation value increases fast by layer 5 (for coherence and relevance or 7 (for consistency and fluency) and then grows much slower, sometimes going flat or even dropping down. We have no guess why the dependency of the correlations on the layer Id is so strong immediately after the layer #2 and why it is weak further in the wide range of the middle layers. However, we can speculate about the sharp drop after the 'layer #21 peak'. It may be that below the layer #21 peak, the BERT model keeps a lot of generic contextual information for two reasons: it is trained for two tasks (next sentence prediction and masked token prediction), and each node has to be useful for all or for the most of the nodes above. But after Figure 2 : Spearman correlation between SummEval experts scores and ESTIME using embeddings taken from different layers of the model. the peak, the last few layers at positions close to each text token are strongly influenced by the token prediction task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 126, |
|
"text": "Figures 1 and 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1252, |
|
"end": 1260, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Dependency on layers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the plots shown in this Appendix, as well as in all other plots through the paper, the correlations p-values are below 0.05 (mostly far below). Unlike the summary level correlations, the system level correlations have not much data. This is why, keeping only the correlations with p-values below 0.05, we can show in Figure 3 only the consistency quality, and even for the consistency we have less range of layers. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 328, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Dependency on layers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "N w = i:t i \u2208T \u03b2:t \u03b2 =t i H((e i e \u03b2 ) \u2212 s(i)) (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Dependency on layers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Figures 4 and 5 we see how N w differs from ESTIME (N a ). ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 18, |
|
"text": "Figures 4 and 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Dependency on layers", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used unnormalized embeddings for ESTIME. From Figures 6 and 7 it is clear that normalizing embeddings does not improve ESTIME. Curiously, the effect of the normalization on the correlations is in destroying the peak around the layer 21. This could mean that the lengths of the embeddings carry all the information necessary for creating the peak at the layer 21.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 64, |
|
"text": "Figures 6 and 7", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C Normalization of embeddings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As default and through the paper ESTIME uses the model bert-large-uncased-whole-word-masking. In Figures 8 and 9 we show an example of a comparison with another model -bert-base-uncased. Unlike the large model, the bert-base-uncased has 0-12 range of its layers, and in the plots here we rescaled them by x2, interpolating in between for odd layer Ids. This allows to compare the trends of the correlations along the relative depth of the transformer. We observe the familiar quick rise at low depth, a drop at high levels close to the output, and a slow growth or plateau in between, -but all these features are less sharp for the bert-baseuncased. It is puzzling that a larger transformer, with twice longer 'distance' for backpropagation to travel from the top to the bottom, has more distinct features of quick rise, plateau, peak and drop, with exact locations of the end of the quick rise and of the peak. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 112, |
|
"text": "Figures 8 and 9", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D Comparison with base BERT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Throughout the paper we used all text tokens for ESTIME. In Figures 10 and 11 we show an example of excluding from consideration a part of speech: determiners. Determiners occur very frequently in the text, but exclusion of them does not make much difference in the resulting correlations with human scores, especially for the quality we are most interested in: consistency. Figure 10 : Kandall Tau-c correlation between Sum-mEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: all tokens are used, as is done throughout the paper. Thin lines: tokens of determiners (part of speech) are not used. Figure 11 : Spearman correlation between SummEval experts scores and EESTIME by embeddings from different layers of the model. Thick lines: all tokens are used, as is done throughout the paper. Thin lines: tokens of determiners (part of speech) are not used.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 77, |
|
"text": "Figures 10 and 11", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 384, |
|
"text": "Figure 10", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 648, |
|
"text": "Figure 11", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "E Example of excluding a part of speech", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Section 2 we explained that for faster processing we take embeddings not one at a time, but as much as fit into an input window, as long as the masking is done with 8 tokens separation, and within the margin 50 tokens from the input edges (unless input edge touches the edge of the text). In Figures 12 and 13 we compare our default parameters with a twice more sparse version: 16 tokens separation, and 100 tokens margin. The sparser version should be better, but slower to run. From the figures it is clear that the sparser version has almost the same correlations; our default sparsity is good enough. Figure 12 : Kandall Tau-c correlation between Sum-mEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: sparsity of the masking is defined by the distance 8 and the margin 50 (see Section 2), as used through the paper. Thin lines: Distance 8, margin 100. Figure 13 : Spearman correlation between SummEval experts scores and EESTIME by embeddings from different layers of the model. Thick lines: sparsity of the masking is defined by the distance 8 and the margin 50 (see Section 2), as used through the paper. Thin lines: Distance 8, margin 100.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 313, |
|
"text": "Figures 12 and 13", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 609, |
|
"end": 618, |
|
"text": "Figure 12", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 905, |
|
"end": 914, |
|
"text": "Figure 13", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "F Parameters for sparse masking", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/PrimerAI/blanc/tree/master/estime", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/PrimerAI/blanc/tree/master/estime", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/Yale-LILY/SummEval", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/PrimerAI/blanc#blanc-on-summevaldataset 5 https://github.com/recitalAI/summa-qa 6 https://github.com/yg211/acl20-ref-free-eval 7 https://github.com/Tiiiger/bert_score 8 https://github.com/google-research/googleresearch/tree/master/rouge", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Nidhi Vyas and anonymous reviewers for review of the paper and valuable feedback.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Reevaluating evaluation in text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Manik", |
|
"middle": [], |
|
"last": "Bhandari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Narayan Gour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atabak", |
|
"middle": [], |
|
"last": "Ashfaq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pengfei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9347--9359", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manik Bhandari, Pranav Narayan Gour, Atabak Ash- faq, Pengfei Liu, and Graham Neubig. 2020. Re- evaluating evaluation in text summarization. In Pro- ceedings of the 2020 Conference on Empirical Meth- ods in Natural Language Processing, pages 9347- 9359. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Factual error correction for abstractive summarization models", |
|
"authors": [ |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiapeng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jackie Chi Kit", |
|
"middle": [], |
|
"last": "Cheung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6251--6258", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meng Cao, Yue Dong, Jiapeng Wu, and Jackie Chi Kit Cheung. 2020. Factual error correction for abstrac- tive summarization models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, pages 6251-6258. Associa- tion for Computational Linguistics (2020).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Towards question-answering as an automatic metric for evaluating the content quality of a summary", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Deutsch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tania", |
|
"middle": [], |
|
"last": "Bedrax-Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.00490" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Deutsch, Tania Bedrax-Weiss, and Dan Roth. 2020. Towards question-answering as an automatic metric for evaluating the content quality of a sum- mary. arXiv, arXiv:2010.00490.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "FEQA: A question answering evaluation framework for faithfulness assessment in abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Esin", |
|
"middle": [], |
|
"last": "Durmus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5055--5070", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Esin Durmus, He He, and Mona Diab. 2020. FEQA: A question answering evaluation framework for faith- fulness assessment in abstractive summarization. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5055- 5070. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Play the Shannon game with language models: A human-free approach to summary evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Egan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oleg", |
|
"middle": [], |
|
"last": "Vasilyev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bohannon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2103.10918" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas Egan, Oleg Vasilyev, and John Bohannon. 2021. Play the Shannon game with language mod- els: A human-free approach to summary evaluation. arXiv, arXiv:2103.10918.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "How contextual are contextualized word representations? Comparing the geometry of BERT, ELMo, and GPT-2 embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Kawin", |
|
"middle": [], |
|
"last": "Ethayarajh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kawin Ethayarajh. 2019. How contextual are contex- tualized word representations? Comparing the ge- ometry of BERT, ELMo, and GPT-2 embeddings. In Proceedings of the 2019 Conference on Empiri- cal Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing, pages 55-65. Association for Computational Linguistics (Hong Kong, China, 2019).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Question answering as an automatic evaluation metric for news article summarization", |
|
"authors": [ |
|
{ |
|
"first": "Matan", |
|
"middle": [], |
|
"last": "Eyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Baumel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "3938--3948", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matan Eyal, Tal Baumel, and Michael Elhadad. 2019. Question answering as an automatic evaluation met- ric for news article summarization. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1, pages 3938-3948. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "SummEval: Reevaluating summarization evaluation. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Fabbri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Kry\u015bci\u0144ski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.12626v4" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander R. Fabbri, Wojciech Kry\u015bci\u0144ski, Bryan McCann, Caiming Xiong, Richard Socher, and Dragomir Radev. 2020. SummEval: Re- evaluating summarization evaluation. arXiv, arXiv:2007.12626v4.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Robust neural abstractive summarization systems and evaluation against adversarial information. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.06065" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lisa Fan, Dong Yu, and Lu Wang. 2018. Ro- bust neural abstractive summarization systems and evaluation against adversarial information. arXiv, arXiv:1810.06065.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Go figure! A meta evaluation of factuality in summarization. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Saadia", |
|
"middle": [], |
|
"last": "Gabriel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Jha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.12834" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saadia Gabriel, Asli Celikyilmaz, Rahul Jha, Yejin Choi, and Jianfeng Gao. 2020. Go figure! A meta evaluation of factuality in summarization. arXiv, arXiv:2010.12834.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "SU-PERT: Towards new frontiers in unsupervised evaluation metrics for multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Eger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1347--1354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Gao, Wei Zhao, and Steffen Eger. 2020. SU- PERT: Towards new frontiers in unsupervised evalu- ation metrics for multi-document summarization. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1347- 1354. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Teaching machines to read and comprehend", |
|
"authors": [ |
|
{ |
|
"first": "Karl", |
|
"middle": [], |
|
"last": "Moritz Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom\u00e1\u0161", |
|
"middle": [], |
|
"last": "Ko\u010disk\u00fd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lasse", |
|
"middle": [], |
|
"last": "Espeholt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Will", |
|
"middle": [], |
|
"last": "Kay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mustafa", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "1693--1701", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karl Moritz Hermann, Tom\u00e1\u0161 Ko\u010disk\u00fd, Edward Grefen- stette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In Advances in Neural Informa- tion Processing Systems 28, pages 1693-1701. Cur- ran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "What have we achieved on text summarization?", |
|
"authors": [ |
|
{ |
|
"first": "Dandan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leyang", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sen", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guangsheng", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "446--469", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dandan Huang, Leyang Cui, Sen Yang, Guangsheng Bao, Kun Wang, Jun Xie, and Yue Zhang. 2020. What have we achieved on text summarization? In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, pages 446-469. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Evaluating the factual consistency of abstractive text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Kryscinski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Mccann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9332--9346", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wojciech Kryscinski, Bryan McCann, Caiming Xiong, and Richard Socher. 2020. Evaluating the factual consistency of abstractive text summarization. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, pages 9332-9346. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "ROUGE: A package for automatic evaluation of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of Workshop on Text Summarization Branches Out", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "74--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for au- tomatic evaluation of summaries. In Proceedings of Workshop on Text Summarization Branches Out, pages 74-81. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Automatically evaluating content selection in summarization without human models", |
|
"authors": [ |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "306--314", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annie Louis and Ani Nenkova. 2009. Automatically evaluating content selection in summarization with- out human models. In Proceedings of the 2009 Con- ference on Empirical Methods in Natural Language Processing, pages 306-314. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Truth or error? Towards systematic analysis of factual errors in abstractive summaries", |
|
"authors": [ |
|
{ |
|
"first": "Klaus-Michael", |
|
"middle": [], |
|
"last": "Lux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maya", |
|
"middle": [], |
|
"last": "Sappelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Larson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the First Workshop on Evaluation and Comparison of NLP Systems (Eval4NLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus-Michael Lux, Maya Sappelli, and Martha Lar- son. 2020. Truth or error? Towards systematic analysis of factual errors in abstractive summaries. In Proceedings of the First Workshop on Evalua- tion and Comparison of NLP Systems (Eval4NLP), pages 1-10. Association for Computational Linguis- tics (2020).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "On faithfulness and factuality in abstractive summarization", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Maynez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1906--1919", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua Maynez, Shashi Narayan, Bernd Bohnet, and Ryan McDonald. 2020. On faithfulness and factu- ality in abstractive summarization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1906-1919. Asso- ciation for Computational Linguistics (2020).", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "BLEU: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Compu- tational Linguistics (ACL), pages 311-318, Philadel- phia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Answers unite! Unsupervised metrics for reinforced summarization models", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Scialom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Lamprier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Piwowarski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacopo", |
|
"middle": [], |
|
"last": "Staiano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3246--3256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Scialom, Sylvain Lamprier, Benjamin Pi- wowarski, and Jacopo Staiano. 2019. Answers unite! Unsupervised metrics for reinforced summa- rization models. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing, pages 3246- 3256, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Is human scoring the best criteria for summary evaluation?", |
|
"authors": [ |
|
{ |
|
"first": "Oleg", |
|
"middle": [], |
|
"last": "Vasilyev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bohannon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2184--2191", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.findings-acl.192" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oleg Vasilyev and John Bohannon. 2021. Is human scoring the best criteria for summary evaluation? In Findings of the Association for Computational Lin- guistics: ACL-IJCNLP 2021, pages 2184-2191. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Fill in the BLANC: Human-free quality estimation of document summaries", |
|
"authors": [ |
|
{ |
|
"first": "Oleg", |
|
"middle": [], |
|
"last": "Vasilyev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedant", |
|
"middle": [], |
|
"last": "Dharnidharka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bohannon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the First Workshop on Evaluation and Comparison of NLP Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.eval4nlp-1.2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oleg Vasilyev, Vedant Dharnidharka, and John Bohan- non. 2020a. Fill in the BLANC: Human-free quality estimation of document summaries. In Proceedings of the First Workshop on Evaluation and Compari- son of NLP Systems, pages 11-20. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Sensitivity of BLANC to human-scored qualities of text summaries. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Oleg", |
|
"middle": [], |
|
"last": "Vasilyev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedant", |
|
"middle": [], |
|
"last": "Dharnidharka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Egan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charlene", |
|
"middle": [], |
|
"last": "Chambliss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bohannon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.06716" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oleg Vasilyev, Vedant Dharnidharka, Nicholas Egan, Charlene Chambliss, and John Bohannon. 2020b. Sensitivity of BLANC to human-scored qualities of text summaries. arXiv, arXiv:2010.06716.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Asking and answering questions to evaluate the factual consistency of summaries", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages s 5008-5020. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Kyunghyun Cho, and Mike Lewis. 2020. Asking and answering questions to evaluate the fac- tual consistency of summaries. In Proceedings of the 58th Annual Meeting of the Association for Com- putational Linguistics, pages s 5008-5020. Associa- tion for Computational Linguistics (2020).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45. Association for Computational Linguistics (2020).", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "SUM-QE: a BERT-based summary quality estimation model", |
|
"authors": [ |
|
{ |
|
"first": "Stratos", |
|
"middle": [], |
|
"last": "Xenouleas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prodromos", |
|
"middle": [], |
|
"last": "Malakasiotis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6005--6011", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stratos Xenouleas, Prodromos Malakasiotis, Marianna Apidianaki, and Ion Androutsopoulos. 2019. SUM- QE: a BERT-based summary quality estimation model. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing, pages 6005-6011, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "BERTScore: Evaluating text generation with BERT. arXiv", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.09675v3" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. BERTScore: Evaluating text generation with BERT. arXiv, arXiv:1904.09675v3.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "MoverScore: Text generation evaluating with contextualized embeddings and earth mover distance", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxime", |
|
"middle": [], |
|
"last": "Peyrard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Eger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "563--578", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Zhao, Maxime Peyrard, Fei Liu, Yang Gao, Chris- tian M. Meyer, and Steffen Eger. 2019. MoverScore: Text generation evaluating with contextualized em- beddings and earth mover distance. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing, pages 563-578. Association for Computa- tional Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Kandall Tau-c correlation between Sum-mEval experts scores and ESTIME using embeddings taken from different layers of the model.", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Spearman and Kendall Tau-c correlations -system level -between SummEval experts scores of consistency and ESTIME using embeddings taken from different layers of the model.B Count of winning tokens in textESTIME is defined in Equation 1 and is considered through the paper as a count of 'alarming' summary tokens. It could be alternatively defined as a count of all winner-tokens from the text, as defined in Equation 3.", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Kandall Tau-c correlation between Sum-mEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: ESTIME (as defined by Equation 1 and considered through the paper). Thin lines: N w as defined by Equation 3.", |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Spearman correlation between SummEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: ESTIME (as defined by Equation 1 and considered through the paper). Thin lines: N w as defined by Equation 3.", |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Kandall Tau-c correlation between Sum-mEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: unnormalized embeddings (as used through the paper). Thin lines: normalized embeddings.", |
|
"num": null |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Spearman correlation between SummEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: unnormalized embeddings (as used through the paper). Thin lines: normalized embeddings.", |
|
"num": null |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Kandall Tau-c correlation between Sum-mEval experts scores and ESTIME by embeddings from different layers of the model. Thick lines: the model is bert-large-uncased-whole-word-masking. Thin lines: the model is bert-base-uncased.", |
|
"num": null |
|
}, |
|
"FIGREF8": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Spearman correlation between SummEval experts scores and EESTIME by embeddings from different layers of the model. Thick lines: the model is bert-large-uncased-whole-word-masking. Thin lines: the model is bert-base-uncased.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table><tr><td>measure</td><td colspan=\"2\">consistency</td><td colspan=\"2\">relevance</td><td/><td>coherence</td><td colspan=\"2\">fluency</td></tr><tr><td/><td>\u03c1</td><td>\u03c4</td><td>\u03c1</td><td>\u03c4</td><td>\u03c1</td><td>\u03c4</td><td>\u03c1</td><td>\u03c4</td></tr><tr><td>(-)ESTIME-21</td><td colspan=\"8\">0.404 0.200 0.188 0.134 0.300 0.217 0.399 0.232</td></tr><tr><td>(-)ESTIME-24</td><td colspan=\"8\">0.358 0.176 0.117 0.084 0.187 0.134 0.363 0.209</td></tr><tr><td>(-)J-Shannon</td><td colspan=\"8\">0.193 0.095 0.406 0.298 0.289 0.213 0.125 0.072</td></tr><tr><td>SummaQA-F1</td><td colspan=\"3\">0.174 0.085 0.16</td><td colspan=\"4\">0.113 0.089 0.065 0.12</td><td>0.069</td></tr><tr><td>SummaQA-P</td><td colspan=\"8\">0.197 0.097 0.179 0.127 0.112 0.082 0.133 0.076</td></tr><tr><td>SUPERT</td><td colspan=\"8\">0.297 0.147 0.306 0.222 0.236 0.175 0.175 0.101</td></tr><tr><td colspan=\"9\">BERTScore-F1 0.109 0.053 0.371 0.273 0.377 0.277 0.142 0.082</td></tr><tr><td>BERTScore-P</td><td colspan=\"8\">0.055 0.027 0.268 0.196 0.323 0.238 0.126 0.072</td></tr><tr><td>BERTScore-R</td><td colspan=\"7\">0.164 0.081 0.423 0.309 0.345 0.253 0.12</td><td>0.069</td></tr><tr><td>BLEU</td><td colspan=\"8\">0.095 0.047 0.213 0.153 0.176 0.128 0.140 0.080</td></tr><tr><td>ROUGE-L</td><td colspan=\"8\">0.115 0.057 0.241 0.174 0.170 0.124 0.079 0.045</td></tr><tr><td>ROUGE-1</td><td colspan=\"8\">0.137 0.067 0.302 0.220 0.184 0.134 0.080 0.046</td></tr><tr><td>ROUGE-2</td><td colspan=\"8\">0.129 0.063 0.245 0.177 0.146 0.105 0.063 0.036</td></tr><tr><td>ROUGE-3</td><td colspan=\"8\">0.149 0.073 0.251 0.180 0.160 0.116 0.066 0.038</td></tr><tr><td/><td/><td/><td>96</td><td/><td/><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"text": "BLANC-AXXL 0.200 0.098 0.246 0.179 0.127 0.093 0.115 0.066 BLANC-BLU 0.207 0.102 0.217 0.156 0.116 0.085 0.112 0.065 (-)ESTIME-12 0.374 0.184 0.140 0.100 0.238 0.173 0.343 0.198", |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |