|
{ |
|
"paper_id": "L18-1016", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:39:12.692743Z" |
|
}, |
|
"title": "A Corpus for Modeling Word Importance in Spoken Dialogue Transcripts", |
|
"authors": [ |
|
{ |
|
"first": "Sushant", |
|
"middle": [], |
|
"last": "Kafle", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Motivated by a project to create a system for people who are deaf or hard-of-hearing that would use automatic speech recognition (ASR) to produce real-time text captions of spoken English during in-person meetings with hearing individuals, we have augmented a transcript of the Switchboard conversational dialogue corpus with an overlay of word-importance annotations, with a numeric score for each word, to indicate its importance to the meaning of each dialogue turn. Further, we demonstrate the utility of this corpus by training an automatic word importance labeling model; our best performing model has an F-score of 0.60 in an ordinal 6-class word-importance classification task with an agreement (concordance correlation coefficient) of 0.839 with the human annotators (agreement score between annotators is 0.89). Finally, we discuss our intended future applications of this resource, particularly for the task of evaluating ASR performance, i.e. creating metrics that predict ASR-output caption text usability for DHH users better than Word Error Rate (WER).", |
|
"pdf_parse": { |
|
"paper_id": "L18-1016", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Motivated by a project to create a system for people who are deaf or hard-of-hearing that would use automatic speech recognition (ASR) to produce real-time text captions of spoken English during in-person meetings with hearing individuals, we have augmented a transcript of the Switchboard conversational dialogue corpus with an overlay of word-importance annotations, with a numeric score for each word, to indicate its importance to the meaning of each dialogue turn. Further, we demonstrate the utility of this corpus by training an automatic word importance labeling model; our best performing model has an F-score of 0.60 in an ordinal 6-class word-importance classification task with an agreement (concordance correlation coefficient) of 0.839 with the human annotators (agreement score between annotators is 0.89). Finally, we discuss our intended future applications of this resource, particularly for the task of evaluating ASR performance, i.e. creating metrics that predict ASR-output caption text usability for DHH users better than Word Error Rate (WER).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "There has been increasing interest among researchers of speech and language technology applications to identify the importance of individual words, for the overall meaning of the text. Depending on the context of how the importance of a word is defined, this task has found use in varieties of applications such as text summarization (Hong and Nenkova, 2014; Yih et al., 2007) , text classification (Sheikh et al., 2016) , or speech synthesis (Mishra et al., 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 334, |
|
"end": 358, |
|
"text": "(Hong and Nenkova, 2014;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 376, |
|
"text": "Yih et al., 2007)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 399, |
|
"end": 420, |
|
"text": "(Sheikh et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 464, |
|
"text": "(Mishra et al., 2007)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Our laboratory is currently designing a system to benefit people who are deaf or hard-of-hearing (DHH) who are engaged in a live meeting with hearing colleagues. In many settings, sign language interpreting or professional captioning (where a human types the speech, displayed as text on a screen for the user), are unavailable, e.g. in impromptu conversations in the workplace. A system that uses automatic speech recognition (ASR) to generate captions in real-time could display this text on mobile devices for DHH users, but text output from ASR systems inevitably contains errors. Thus, we were motivated to understand which words in the text were most important to the overall meaning, to inform our evaluation of ASR accuracy for this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "In this paper, we present a word-importance annotation of transcripts of the Switchboard corpus . While our overall goal is to produce measures of ASR accuracy for our caption application; to demonstrate the use of this corpus, in this paper, we present models that predict word-importance in spoken dialogue transcripts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "ASR researchers generally report the performance of their systems using a metric called Word Error Rate (WER). The metric considers the number of errors in the output of the ASR system, normalized by the number of words human actually said in the audio recording. While WER has been the most commonly used intrinsic measure for the evaluation of ASR, there have been criticisms of WER (Mc-Cowan et al., 2004; Morris et al., 2004) , and several re-searchers have recommended alternative measures to better predict human task-performance in applications that depend on ASR (Garofolo et al., 2000; Mishra et al., 2011; Kafle and Huenerfauth, 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 385, |
|
"end": 408, |
|
"text": "(Mc-Cowan et al., 2004;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 429, |
|
"text": "Morris et al., 2004)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 594, |
|
"text": "(Garofolo et al., 2000;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 615, |
|
"text": "Mishra et al., 2011;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 616, |
|
"end": 644, |
|
"text": "Kafle and Huenerfauth, 2016)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ASR Evaluation", |
|
"sec_num": "1.1." |
|
}, |
|
{ |
|
"text": "Among these newly proposed metrics, a common theme has been: rather than simply counting the number of errors, it would be better to consider the importance of the individual words that are incorrect -suggesting that it would be better to more heavily penalize systems that make errors on words that are important (with the definition of importance based on the specific application or task). This approach of penalizing errors differentially has been shown to be useful in various application settings, e.g. in our research for DHH users, we have found that an evaluation metric designed for predicting the usability of an ASR-generated transcription as a caption text for these users could benefit from word importance information (Kafle and Huenerfauth, 2017) . However, estimating the importance of a word has been challenging for our team thus far, because we have lacked corpora of conversational dialogue with word-importance annotation, for training a word-importance model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 733, |
|
"end": 762, |
|
"text": "(Kafle and Huenerfauth, 2017)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ASR Evaluation", |
|
"sec_num": "1.1." |
|
}, |
|
{ |
|
"text": "Prior research on identifying and scoring important words in a text has largely focused on the task of keyword extraction, which involves identifying a set of descriptive words in a document that serves as a dense summary of the document. Several automatic keyword extraction techniques have been investigated over the years, including unsupervised methods using, e.g. Term Frequency x Inverse Document Frequency (TF-IDF) weighting (HaCohen-Kerner et al., 2005) , word co-occurrence probability estimation (Matsuo and Ishizuka, 2004) -as well as supervised methods that leverage various linguistic features from text to achieve strong predictive performance (Liu et al., 2011; Liu et al., 2004; Hulth, 2003; Sheeba and Vivekanandan, 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 432, |
|
"end": 461, |
|
"text": "(HaCohen-Kerner et al., 2005)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 676, |
|
"text": "(Liu et al., 2011;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 694, |
|
"text": "Liu et al., 2004;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 695, |
|
"end": 707, |
|
"text": "Hulth, 2003;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 708, |
|
"end": 738, |
|
"text": "Sheeba and Vivekanandan, 2012)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Importance Estimation", |
|
"sec_num": "1.2." |
|
}, |
|
{ |
|
"text": "While this conceptualization of word importance as a keyword-extraction problem has led to positive results in the field of text summarization (Litvak and Last, 2008; Wan et al., 2007; Hong and Nenkova, 2014) , this approach may not generalize to other applications. For instance, given the sometimes meandering nature of topic transition in spontaneous speech dialogue (Sheeba and Vivekanandan, 2012) , applications that process transcripts of such dialogue may benefit from a model of word importance that is more local, i.e. based on the importance of a word at sentential, utterance, or local dialogue level, rather than at a documentlevel. Furthermore, the dyadic nature of dialogue, with interleaved contributions from multiple speakers, may require special consideration when evaluating word importance. In this paper, we present a corpus with annotation of word importance that could be used to support research into these complex issues.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 166, |
|
"text": "(Litvak and Last, 2008;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 184, |
|
"text": "Wan et al., 2007;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 185, |
|
"end": 208, |
|
"text": "Hong and Nenkova, 2014)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 401, |
|
"text": "(Sheeba and Vivekanandan, 2012)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Importance Estimation", |
|
"sec_num": "1.2." |
|
}, |
|
{ |
|
"text": "In eye-tracking studies of reading behavior, researchers have found that readers rarely glance at every word in a text sequentially: Instead, they sometimes regress (glance back at previous words), re-fixate on a word, or skip words entirely (Rayner, 1998) . This research supports the premise that some words are of higher importance than others, for readers. Analyses of eye-tracking recordings have revealed a relationship between these eye-movement behaviors and various linguistic features, e.g. word length or word predictability. In general, readers' gaze often skips over words that are shorter or more predictable (Rayner et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 256, |
|
"text": "(Rayner, 1998)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 623, |
|
"end": 644, |
|
"text": "(Rayner et al., 2011)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Defining Word Importance", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "While eye-tracking suggests some features that may relate to readers' judgments of word importance, at least as expressed through their choice of eye fixations, we needed to develop a specific definition of word importance in order to develop annotation guidelines for our study. Rather than ask annotators to consider specific features, e.g. word length, which may pre-suppose a particular model, we instead took a functional perspective, with our application domain in mind. That is, we define word importance for spontaneous spoken conversation as the degree to which a reader of a transcript of the dialogue would be unable to understand the overall meaning of a conversational utterance (a single turn of dialogue) if that word had been \"dropped\" or omitted from the transcript. This definition underlies our annotation scheme (in 3 1 ) and suits our target application, i.e. evaluating ASR for real-time captioning of meetings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Defining Word Importance", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "In addition, for our annotation project, we defined wordimportance as a single-dimensional property, which could be expressed on a continuous scale from 0.0 (not important at all to the meaning of the utterance) to 1.0 (very important). Figure 1 illustrates how numerical importance scores can be assigned to words in a sentence -in fact, this figure displays actual scores assigned by a human annotator working on our project. Of course, asking human annotators to assign specific numerical scores to quantify the importance of a word is not straightforward. In later sections, we discuss how we attempt to overcome the subjective nature of this task, to promote consistency between annotators, as we developed this annotated resource (see Section 3 1 ). Section 4 characterizes the level of agreement between our annotators on this task. Figure 1 : Visualization of importance scores assigned to words in a sentence by a human annotator on our project, with the height and font-size of words indicating their importance score (and redundant color coding: green for highimportance words with score above 0.6, blue for words with score between 0.3 and 0.6, and gray otherwise).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 245, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 840, |
|
"end": 848, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Defining Word Importance", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The Switchboard corpus consists of audio recordings of approximately 260 hours of speech consisting of about 2,400 two-sided telephone conversations among 543 speakers (302 male, 241 female) from across the United States . In January 2003, the Institute for Signal and Information Processing (ISIP) released written transcripts for the entire corpus, which consists of nearly 400,000 conversational turns. The ISIP transcripts include a complete lexicon list and automatic word alignment timing corresponding to the original audio files 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Annotation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In our project, a pair of annotators have assigned wordimportance scores to these transcripts. As of September 2017, they have annotated over 25,000 tokens, with the overlap of approximately 3,100 tokens. With this paper, we announce the release 2 of these annotations as a set of supplementary files, aligned to the ISIP transcripts. Our annotation work continues, and we aim to annotate all of the Switchboard corpus and with a larger group of annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus Annotation", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "To reduce the cognitive load on annotators and to promote consistency, we created the following annotation scheme: Range and Constraints. Each word is assigned a numeric score between [0, 1], where 1 indicates a high importance score; the numeric score has the precision of 0.05. Importance scores are not meant to indicate an absolute proportion of the utterance's meaning represented by each word, i.e. the scores do not have to sum to 1. Methodology. Given an utterance (a speaker's single turn in the conversation), the annotator first considers the overall meaning conveyed by the utterance, with the help of the previous conversation history (if available). The annotator then scores each word based on its (direct or indirect) contribution to the utterance's meaning, using the rubric described in the Interpretation and Scoring section below. [0 -0.3) Words that are of least importance -these words can be easily omitted from the text without much consequence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 851, |
|
"end": 859, |
|
"text": "[0 -0.3)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Scheme", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "[0.3 -0.6) Words that are fairly important -omitting these words will take away some important details from the utterance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "[0.6 -1] Words that are of high importance -omitting these words will change the message of the utterance quite significantly. Table 1 : Guidance for the annotators to promote consistency and uniformity in the use of numerical scores.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 134, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rating Scheme. To help annotators calibrate their scores, Table 1 provides some recommendations for how to select word-importance scores in various numerical ranges.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 65, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Interpretation and Scoring. Annotators should consider how their understanding of the utterance would be affected if this word had been \"dropped,\" i.e. replaced with a blank space (\" \"). Since these are conversations between pairs of speakers, annotators should consider how much the other person in the conversation would have difficulty understanding the speaker's message if that word had been omitted, i.e. if they had not heard that word intelligibly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There were 3,100 tokens in our \"overlap\" set, i.e. the subset of transcripts independently labeled by both annotators. This set was used as the basis for calculating inter-annotator agreement. Since scores were nearly continuous (ranges [0,1] with a precision of 0.05), we computed the Concordance Correlation Coefficient (\u03c1 c ), also known as Lin's concordance correlation coefficient, as our primary metric for measuring the agreement between the annotators. This metric indicates how well a new test or measurement (X) reproduces a gold standard or measure (Y). Considering the annotations from one annotator as a gold standard, we can generalize this measure to compute the agreement between two annotators. Like other correlation coefficients, \u03c1 c also ranges from -1 to 1; 1 being the score of perfect agreement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Concordance between the two measures can be characterized by the expected value of their squared difference as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "E[(Y \u2212 X) 2 ] = (\u00b5 y \u2212 \u00b5 x ) 2 + \u03c3 2 x + \u03c3 2 y \u2212 2\u03c1\u03c3 x \u03c3 y (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "where, \u03c1 is the correlation coefficient, \u00b5 x and \u00b5 y are the means of the population of the variables X and Y , and \u03c3 x and \u03c3 y are their standard deviation. The expectation score coefficient (between -1 and 1) is calculated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c1 c = 2\u03c1S x S y (\u0232 \u2212X) 2 + S 2 x + S 2 y", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "where, \u03c1 c is the correlation coefficient,X and\u0232 are the mean of X and Y , and S x and S y are standard deviations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "We obtained an agreement score (\u03c1 c ) of 0.89 between our annotators, which we interpret as an acceptable level of from (Lample et al., 2016) . The bottom layer represents word-embedding inputs, passed to bi-directional LSTM layers above. Each LSTM takes as input the hidden state from the previous time step and word embeddings from the current step, and outputs a new hidden state. C i concatenates hidden representations from LSTMs (L i and R i ) to represent the word at time i in its context.] agreement, given the subjective nature of the task of quantifying word importance in spoken dialogue transcripts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 141, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-annotator Agreement", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "To demonstrate the use of this corpus, we trained a prediction model, by adopting the neural architecture described in (Lample et al., 2016) consisting of bidirectional LSTM encoders with a sequential Conditional Random Field (CRF) layer on top. Our input word tokens were first mapped to a sequence of pre-trained distributed embeddings (Pennington et al., 2014) and then combined with the learned character-based word representations to get the final word representation. As shown in Figure 2 , the bidirectional LSTM encoders are used to create a context-aware representation of each word. The hidden representations from each LSTM were concatenated to obtain a final representation, conditioned on the whole sentence. The CRF layer uses this representation to look for the most optimal state (Y ) sequence through all the possible state configurations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 140, |
|
"text": "(Lample et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 363, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 494, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Prediction", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "The neural framework was implemented using Tensorflow, and the code is publicly available 3 . The word embeddings were initialized with publicly available pre-trained glove vectors (Pennington et al., 2014) . The embeddings for characters were set to length 100 and were initialized randomly. The LSTM layer size was set to 300 in each direction for word-and 100 for character-level components. Parameters were optimized using the Adam (Kingma and Ba, 2014) optimizer, with the learning rate initialized at 0.001 with a decay rate of 0.9, and sentences were grouped into batches of size 20. We applied a dropout with a probability of 0.5 during training on word embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 206, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 457, |
|
"text": "(Kingma and Ba, 2014)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Prediction", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "We investigated two variations of this model: (i) a bidirectional LSTM model with sequential CRF layer on top (LSTM-CRF) treating the problem as a discrete classification task, (ii) a new bidirectional LSTM model with a sigmoid layer on top (LSTM-SIG) for a continuous prediction. The LSTM-CRF models the prediction task as a classification problem, using a fixed number of non-ordinal class labels. In contrast, the LSTM-SIG model provides a continuous prediction, using a sigmoid nonlinearity to bound the prediction scores between 0 and 1. Using a square loss, we train this model to directly learn to predict the annotation scores, similar to a regression task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Prediction", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Partitioning our corpus as 80% training, 10% development, and 10% test sets, we evaluated our model using two measures: (i) total root mean square error (RMS) -the deviation of the model predictions from the human-annotations and, (ii) F 1 measure in a classification task -the ability of the model to predict human-annotations categorized into a group of classes. To evaluate performance in terms of classification, we discretized annotation scores into 6 classes: Table 2 summarizes the performance of our models on the test set, presenting average scores for 5 different configurations, to compensate for outlier results due to randomness in model initialization. While the LSTM-CRF had a better (higher) F-score on the classification task, its RMS score was worse (higher) than the LSTM-SIG model, which may be due to the limitation of the model as discussed in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 473, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation and Discussion", |
|
"sec_num": "5.1." |
|
}, |
|
{ |
|
"text": "Model RMS F 1 (macro)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation and Discussion", |
|
"sec_num": "5.1." |
|
}, |
|
{ |
|
"text": "LSTM-CRF 0.154 0.60 LSTM-SIG 0.120 0.519 Table 2 : Model performance in terms of RMS deviation and macro-averaged F 1 score, with best results in bold font. Figure 3 provide a more detailed view of the classification performance of each model. Since the LSTM-SIG was trained to optimize the accuracy of its continuous predictions, rather than its discrete assignment of instances to classes, it is not surprising to see a \"wider diagonal\" in the confusion matrix in Figure 3(b) , which indicates that the LSTM-SIG model was more likely to misclassify words using ordinally adjacent classes. The figure illustrates that both models were worse at classifying words with importance scores in the middle range [0.3, 0.7).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 48, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 165, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 477, |
|
"text": "Figure 3(b)", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation and Discussion", |
|
"sec_num": "5.1." |
|
}, |
|
{ |
|
"text": "Treating our human-annotations as ground truth, we also computed the concordance correlation coefficient to measure the agreement between the human annotation and each model. The average correlation between the human annotator and the LSTM-CRF model was higher (\u03c1 c = 0.839), as compared to the LSTM-SIG model (\u03c1 c = 0.826).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Confusion matrices in", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have presented a new collection of annotation of transcripts of the Switchboard conversational speech corpus, produced through human annotation of the importance of individual words to the meaning of each utterance. We have demonstrated the use of this data by training wordimportance prediction models, with the best model achieving an F 1 score of 0.60 and model-human agreement correlation of 0.839. In future work, we will collect additional human annotations for additional sections of the corpus. This research is part of a project on the use of ASR to provide real-time captions of speech for DHH individuals during meetings, and we plan to incorporate these word-importance models into new word-importanceweighted metrics of ASR accuracy, to better predict the usability of ASR-produced captions for these users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "This material was based on work supported by the National Technical Institute for the Deaf (NTID). We thank Tomomi Takeuchi and Michael Berezny for their contributions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "https://www.isip.piconepress.com/projects/switchboard/ 2 http://latlab.ist.rit.edu/lrec2018", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/SushantKafle/speechtext-wimp-labeler", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The trec spoken document retrieval track: A success story", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Garofolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Auzanne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Voorhees", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "; Le Cen-Tre De Hautes Etudes Internationales D'informatique", |
|
"middle": [], |
|
"last": "Documentaire", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Content-Based Multimedia Information Access", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Garofolo, J. S., Auzanne, C. G., and Voorhees, E. M. (2000). The trec spoken document retrieval track: A success story. In Content-Based Multimedia In- formation Access-Volume 1, pages 1-20. LE CEN- TRE DE HAUTES ETUDES INTERNATIONALES D'INFORMATIQUE DOCUMENTAIRE.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "SWITCHBOARD: Telephone speech corpus for research and development", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Godfrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Holliman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jane", |
|
"middle": [], |
|
"last": "Mcdaniel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Godfrey, John J and Holliman, Edward C and McDaniel, Jane. (1992). SWITCHBOARD: Telephone speech cor- pus for research and development.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automatic extraction and learning of keyphrases from scientific articles. Computational linguistics and intelligent text processing", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Hacohen-Kerner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Masa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "657--669", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "HaCohen-Kerner, Y., Gross, Z., and Masa, A. (2005). Au- tomatic extraction and learning of keyphrases from sci- entific articles. Computational linguistics and intelligent text processing, pages 657-669.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Improving the estimation of word importance for news multi-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "712--721", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hong, K. and Nenkova, A. (2014). Improving the estima- tion of word importance for news multi-document sum- marization. In EACL, pages 712-721.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Improved automatic keyword extraction given more linguistic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hulth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 2003 conference on Empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "216--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hulth, A. (2003). Improved automatic keyword extraction given more linguistic knowledge. In Proceedings of the 2003 conference on Empirical methods in natural lan- guage processing, pages 216-223. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Effect of speech recognition errors in text understandability for people who are deaf or hard-of-hearing", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kafle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 7th Workshop on Speech and Language Processing for Assistive Technologies (SLPAT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kafle, S. and Huenerfauth, M. (2016). Effect of speech recognition errors in text understandability for people who are deaf or hard-of-hearing. In Proceedings of the 7th Workshop on Speech and Language Processing for Assistive Technologies (SLPAT). Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Evaluating the usability of automatically generated captions for people who are deaf or hard of hearing", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kafle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Huenerfauth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 19th International ACM SIGACCESS Conference on Computers and Accessibility", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kafle, S. and Huenerfauth, M. (2017). Evaluating the usability of automatically generated captions for peo- ple who are deaf or hard of hearing. In Proceedings of the 19th International ACM SIGACCESS Conference on Computers and Accessibility. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kingma, D. and Ba, J. (2014). Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NAACL HLT 2016, The 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--270", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lample, G., Ballesteros, M., Subramanian, S., Kawakami, K., and Dyer, C. (2016). Neural architectures for named entity recognition. In NAACL HLT 2016, The 2016 Con- ference of the North American Chapter of the Associa- tion for Computational Linguistics: Human Language Technologies, San Diego California, USA, June 12-17, 2016, pages 260-270.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Graph-based keyword extraction for single-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Litvak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Last", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Workshop on Multi-source Multilingual Information Extraction and Summarization, MMIES '08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Litvak, M. and Last, M. (2008). Graph-based keyword extraction for single-document summarization. In Pro- ceedings of the Workshop on Multi-source Multilingual Information Extraction and Summarization, MMIES '08, pages 17-24, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Text classification by labeling words", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "AAAI", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "425--430", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liu, B., Li, X., Lee, W. S., and Yu, P. S. (2004). Text clas- sification by labeling words. In AAAI, volume 4, pages 425-430.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "A supervised framework for keyword extraction from meeting transcripts", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
|
"volume": "19", |
|
"issue": "3", |
|
"pages": "538--548", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liu, F., Liu, F., and Liu, Y. (2011). A supervised frame- work for keyword extraction from meeting transcripts. IEEE Transactions on Audio, Speech, and Language Processing, 19(3):538-548, March.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Keyword extraction from a single document using word co-occurrence statistical information", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matsuo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ishizuka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Journal on Artificial Intelligence Tools", |
|
"volume": "13", |
|
"issue": "01", |
|
"pages": "157--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matsuo, Y. and Ishizuka, M. (2004). Keyword extraction from a single document using word co-occurrence sta- tistical information. International Journal on Artificial Intelligence Tools, 13(01):157-169.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "On the use of information retrieval measures for speech recognition evaluation", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Mccowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Dines", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Gatica-Perez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Flynn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Wellner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Bourlard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "McCowan, I. A., Moore, D., Dines, J., Gatica-Perez, D., Flynn, M., Wellner, P., and Bourlard, H. (2004). On the use of information retrieval measures for speech recog- nition evaluation. Technical report, IDIAP.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Word accentuation prediction using a neural net classifier", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Prud'hommeaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Van Santen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "SSW", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "246--251", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mishra, T., Prud'hommeaux, E. T., and van Santen, J. P. (2007). Word accentuation prediction using a neural net classifier. In SSW, pages 246-251.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Predicting human perceived accuracy of asr systems", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ljolje", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gilbert", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "INTER-SPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1945--1948", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mishra, T., Ljolje, A., and Gilbert, M. (2011). Predicting human perceived accuracy of asr systems. In INTER- SPEECH, pages 1945-1948.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "From wer and ril to mer and wil: improved evaluation measures for connected speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Morris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Maier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Eighth International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Morris, A. C., Maier, V., and Green, P. (2004). From wer and ril to mer and wil: improved evaluation measures for connected speech recognition. In Eighth International Conference on Spoken Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pennington, J., Socher, R., and Manning, C. (2014). Glove: Global vectors for word representation. In Pro- ceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532- 1543.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Eye movements and word skipping during reading: effects of word length and predictability", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Rayner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Slattery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Drieghe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Liversedge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Experimental Psychology: Human Perception and Performance", |
|
"volume": "37", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rayner, K., Slattery, T. J., Drieghe, D., and Liversedge, S. P. (2011). Eye movements and word skipping during read- ing: effects of word length and predictability. Journal of Experimental Psychology: Human Perception and Per- formance, 37(2):514.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Eye movements in reading and information processing: 20 years of research", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Rayner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Psychological bulletin", |
|
"volume": "124", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rayner, K. (1998). Eye movements in reading and infor- mation processing: 20 years of research. Psychological bulletin, 124(3):372.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Improved keyword and keyphrase extraction from meeting transcripts", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sheeba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Vivekanandan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "International Journal of Computer Applications", |
|
"volume": "", |
|
"issue": "13", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheeba, J. and Vivekanandan, K. (2012). Improved key- word and keyphrase extraction from meeting transcripts. International Journal of Computer Applications, 52(13).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Learning word importance with the neural bag-of-words model", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sheikh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Illina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Fohr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Linares", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sheikh, I., Illina, I., Fohr, D., and Linares, G. (2016). Learning word importance with the neural bag-of-words model. In ACL, Representation Learning for NLP (Repl4NLP) workshop.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Towards an iterative reinforcement approach for simultaneous document summarization and keyword extraction", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACL", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "552--559", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wan, X., Yang, J., and Xiao, J. (2007). Towards an it- erative reinforcement approach for simultaneous docu- ment summarization and keyword extraction. In ACL, volume 7, pages 552-559.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Multi-document summarization by maximizing informative content-words", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Vanderwende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IJCAI", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "1776--1782", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yih, W.-t., Goodman, J., Vanderwende, L., and Suzuki, H. (2007). Multi-document summarization by maximizing informative content-words. In IJCAI, volume 7, pages 1776-1782.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "SWITCHBOARD: Telephone speech corpus for research and development", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Godfrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Holliman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jane", |
|
"middle": [], |
|
"last": "Mcdaniel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Godfrey, John J and Holliman, Edward C and McDaniel, Jane. (1992). SWITCHBOARD: Telephone speech cor- pus for research and development.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": ": = [General unfolded network structure of our model, adapted", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "(a) Normalized confusion matrix for LSTM-CRF (b) Normalized confusion matrix for LSTM-SIG Confusion matrices for each model for classification into 6 classes: c 1 = [0, 0.1), c 2 = [0.1, 0.3), and so forth.", |
|
"uris": null |
|
} |
|
} |
|
} |
|
} |