|
{ |
|
"paper_id": "E17-1030", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:52:33.983996Z" |
|
}, |
|
"title": "Sentence Segmentation in Narrative Transcripts from Neuropsychological Tests using Recurrent Convolutional Neural Networks", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Vin\u00edcius", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Treviso", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shulby", |
|
"middle": [], |
|
"last": "Sandra", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Alu\u00edsio", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of S\u00e3o Paulo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automated discourse analysis tools based on Natural Language Processing (NLP) aiming at the diagnosis of languageimpairing dementias generally extract several textual metrics of narrative transcripts. However, the absence of sentence boundary segmentation in the transcripts prevents the direct application of NLP methods which rely on these marks to function properly, such as taggers and parsers. We present the first steps taken towards automatic neuropsychological evaluation based on narrative discourse analysis, presenting a new automatic sentence segmentation method for impaired speech. Our model uses recurrent convolutional neural networks with prosodic, Part of Speech (PoS) features, and word embeddings. It was evaluated intrinsically on impaired, spontaneous speech, as well as, normal, prepared speech, and presents better results for healthy elderly (CTL) (F 1 = 0.74) and Mild Cognitive Impairment (MCI) patients (F 1 = 0.70) than the Conditional Random Fields method (F 1 = 0.55 and 0.53, respectively) used in the same context of our study. The results suggest that our model is robust for impaired speech and can be used in automated discourse analysis tools to differentiate narratives produced by MCI and CTL.", |
|
"pdf_parse": { |
|
"paper_id": "E17-1030", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automated discourse analysis tools based on Natural Language Processing (NLP) aiming at the diagnosis of languageimpairing dementias generally extract several textual metrics of narrative transcripts. However, the absence of sentence boundary segmentation in the transcripts prevents the direct application of NLP methods which rely on these marks to function properly, such as taggers and parsers. We present the first steps taken towards automatic neuropsychological evaluation based on narrative discourse analysis, presenting a new automatic sentence segmentation method for impaired speech. Our model uses recurrent convolutional neural networks with prosodic, Part of Speech (PoS) features, and word embeddings. It was evaluated intrinsically on impaired, spontaneous speech, as well as, normal, prepared speech, and presents better results for healthy elderly (CTL) (F 1 = 0.74) and Mild Cognitive Impairment (MCI) patients (F 1 = 0.70) than the Conditional Random Fields method (F 1 = 0.55 and 0.53, respectively) used in the same context of our study. The results suggest that our model is robust for impaired speech and can be used in automated discourse analysis tools to differentiate narratives produced by MCI and CTL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Mild Cognitive Impairment (MCI) has recently received much attention, as it may represent a preclinical state of Alzheimer's disease (AD) . MCI can affect one or multiple cognitive domains (e.g. memory, language, visuospatial skills and the ex-ecutive function); the kind that affects memory, called amnestic MCI, is the most frequent and that which most often converts to AD (Janoutov\u00e1 et al., 2015) . As dementias are chronic progressive diseases, it is important to identify them in the early stages, because early detection yields a greater chance of success for non-pharmacological treatment strategies such as cognitive training, physical activity and socialization (Teixeira et al., 2012) . The definition of MCI diagnostic criteria is conducted mainly by the cognitive symptoms presented by patients in standardized tests and by functional impairments in daily life (McKhann et al., 2011) . Difficulties related with narrative discourse deficits (e.g. repetitions or gaps during the narrative) may lead an elderly individual to look for a specialist. Narrative discourse is the reproduction of an experienced episode (necessarily evoking memory), respecting temporal and causal relations among events. Although MCI is clinically characterized by episodic memory deficits, language impairment may also occur.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 137, |
|
"text": "(AD)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 400, |
|
"text": "(Janoutov\u00e1 et al., 2015)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 672, |
|
"end": 695, |
|
"text": "(Teixeira et al., 2012)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 874, |
|
"end": 896, |
|
"text": "(McKhann et al., 2011)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Certain widely used neuropsychological tests require patients to retell or understand a story. This is the case of the logical memory test, where one reproduces a story after listening to it. The higher the number of recalled elements from the narrative, the higher the memory score (Wechsler, 1997; Bayles and Tomoeda, 1991; Morris et al., 2006) . However, the main difficulties in applying these tests are: (i) time required, since it is a manual task; and (ii) the subjectivity of the clinician. Therefore, automatic analysis of discourse production is seen as a promising solution for MCI diagnosis, because its early detection ensures a greater chance of success in addressing potentially reversible factors (Muangpaisan et al., 2012) . Since discourse is a natural form of communication, it favors the observation of the patient's functionality in everyday life. Moreover, it provides data for observing the language-cognitive skills interface, such as executive functions (planning, organizing, updating and monitoring data) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 299, |
|
"text": "(Wechsler, 1997;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 325, |
|
"text": "Bayles and Tomoeda, 1991;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 346, |
|
"text": "Morris et al., 2006)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 739, |
|
"text": "(Muangpaisan et al., 2012)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 979, |
|
"end": 1031, |
|
"text": "(planning, organizing, updating and monitoring data)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With regard to the Wechsler Logical Memory (WLM) test, the original narrative used is short, allowing for the use of Automatic Speech Recognition (ASR) output even without capitalization and sentence segmentation, as shown by Lehr et al. (2012) for English. They based their method on automatic alignment of the original and patient transcripts in order to calculate the number of recalled elements.", |
|
"cite_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 244, |
|
"text": "Lehr et al. (2012)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The evaluation of narrative discourse production from the standpoint of linguistic impairment is an attractive alternative as it allows for linguistic microstructure analysis, including phonetic-phonological, morphosyntactic and semantic-lexical components, as well as semanticpragmatic macrostructures. Automated discourse analysis tools based on Natural Language Processing (NLP) resources and tools aiming at the diagnosis of language-impairing dementias via machine learning methods are already available for the English language (Fraser et al., 2015b; Yancheva et al., 2015; Roark et al., 2011) and also for Brazilian Portuguese (BP) (Alu\u00edsio et al., 2016) . The latter study used a publicly available tool, Coh-Metrix-Dementia 1 , to extract 73 textual metrics of narrative transcripts, comprising several levels of linguistic analysis from word counts to semantics and discourse. However, the absence of sentence boundary segmentation in transcripts prevents the direct application of NLP methods that rely on these marks in order for the tools to function properly. To our knowledge, only one study evaluating automatic sentence segmentation in English transcripts of elderly aphasic exists (Fraser et al., 2015a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 534, |
|
"end": 556, |
|
"text": "(Fraser et al., 2015b;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 579, |
|
"text": "Yancheva et al., 2015;", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 599, |
|
"text": "Roark et al., 2011)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 661, |
|
"text": "(Alu\u00edsio et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1199, |
|
"end": 1221, |
|
"text": "(Fraser et al., 2015a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The purpose of this paper is to present our method, DeepBond, for automatic sentence segmentation of spontaneous speech of healthy elderly (CTL) and MCI patients. Although it was evaluated for BP data, it can be adapted to other languages as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The sentence boundary detection task has been treated by many researchers. Liu et al. (2006) investigated the imbalanced data problem, since there are more non-boundary words than not; their 1 http://143.107.183.175:22380/ study was carried out using two speech corpora: conversational telephone and broadcast news, both for English.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 92, |
|
"text": "Liu et al. (2006)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "More recent studies have focused on Conditional Random Field (CRF) and Neural Network models. Wang et al. (2012) and Hasan et al. (2014) use CRF based methods to identify word boundaries in speech corpora datasets, more specifically on English broadcast news data and English conversational speech (lecture recordings), respectively. Khomitsevich et al. (2015) , similar to our work, used a combination of two models, one based on Support Vector Machines to deal with prosodic information, and other based on CRF to deal with lexical information. They combine the two models using a logistic regression classifier. Xu et al. (2014) uses a combination of CRF and a Deep neural network (DNN) to detect sentence boundaries on broadcast news data. Che et al. (2016) uses two different convolutional neural network (CNN), one which moves in only one dimension and another which moves in two. They achieved good results on a TED talks dataset. Tilk and Alum\u00e4e (2015) use a recurrent neural network (RNN) with long short-term memory units to restore punctuation in speech transcripts from broadcast news and conversations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 112, |
|
"text": "Wang et al. (2012)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 136, |
|
"text": "Hasan et al. (2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 360, |
|
"text": "Khomitsevich et al. (2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 631, |
|
"text": "Xu et al. (2014)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 744, |
|
"end": 761, |
|
"text": "Che et al. (2016)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Although there are proposed methods for sentence segmentation of Portuguese datasets (Silla Jr. and Kaestner, 2004; Batista and Mamede, 2011; L\u00f3pez and Pardo, 2015) , none of them are used for transcriptions produced in a clinical setting for the elderly with dementia and related syndromes. The study most similar to our scenario is (Fraser et al., 2015a) , which proposes a segmentation method for aphasic speech based on lexical, PoS and prosodic features using tools and a generic acoustic model trained for English. Their approach is based on a CRF model, and the best results for this study were obtained for nonspontaneous broadcast news data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 115, |
|
"text": "(Silla Jr. and Kaestner, 2004;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 141, |
|
"text": "Batista and Mamede, 2011;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 164, |
|
"text": "L\u00f3pez and Pardo, 2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 356, |
|
"text": "(Fraser et al., 2015a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our method uses recurrent convolutional neural networks with prosodic, PoS features, and also word embeddings and was evaluated intrinsically on impaired, spontaneous speech and normal, prepared speech. Although DNNs have already been used for this task, our work was the first, to the best of our knowledge, to evaluate them on impaired speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A total of 60 participants from a research project on diagnostic tools for language impaired dementias produced narratives used to evaluate our method. Two datasets were used to train our model (Sections 3.1 and 3.2). As a preprocessing step we have removed capitalization information and in order to simulate high-quality ASR, we left all speech disfluences intact. Demographic information for participants in our study is presented in Table 1 : Demographic information of participants in the Cinderella dataset. The Avg. Education is given in years.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 437, |
|
"end": 444, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The Cinderella dataset consists of spontaneous speech narratives produced during a test to elicit narrative discourse with visual stimuli, using a book consisting of sequenced pictures based on the Cinderella story. In the test, an individual verbally tells the story to the examiner based on the pictures. The narrative is manually transcribed by a trained annotator who scores the narrative by counting the number of recalled propositions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Cinderella Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This dataset consists of 60 narrative texts from BP speakers, 20 controls, 20 with AD, and 20 with MCI, diagnosed at the Medical School of University of S\u00e3o Paulo and also used in Alu\u00edsio et al. (2016) . Counting all patient groups, this dataset has an audio duration of 4h and 11m, an average of 1843/60 = 30.72 sentences per narrative, and sentence averages of 23807/1843 = 12.92 words. AD narratives were only used for training the lexical model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 201, |
|
"text": "Alu\u00edsio et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Cinderella Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This dataset was made available by the LaPS (Signal Processing Laboratory) at the Federal University of Par\u00e1 (Batista, 2013) , and is composed of articles from Brazil's 1988 constitution, in which the speech is prepared and read. Each file has an averages 30 seconds.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 124, |
|
"text": "(Batista, 2013)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Brazilian Constitution Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "A preprocessing step removed lexical tips which indicate the beginning of the articles, sections and paragraphs. This removal was carried out on both the transcripts and audio. In addition, we separated the new dataset organized by articles, totaling 357 texts. Then, we marked the end of each article and paragraph and inserted punctuation at the end. Titles and chapters have been ignored during this process. We randomly selected 60 texts from this dataset, forcing only the condition that the number of sentences of each text sentence was greater than 12. We refer to the large dataset as Constitution L, and the dataset with the 60 texts as Constitution S.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Brazilian Constitution Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The average number of sentences in each text of Constitution L is 2698/357 = 7.56, and the average size of these sentences have 63275/2698 = 23.45 words while Constitution S has on average 1409/60 = 23.48 sentences, and these sentences average 30521/1409 = 21.66 words. The total audio duration of Constitution L is 7h 39m, and Constitution S is 3h 43m.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Brazilian Constitution Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Dog Story dataset is available from the BALE (Battery of Language Assessment in Aging, in English) instrument, described in (Jer\u00f4nimo, 2016) . It is composed of transcriptions from the narrative production test based on the presentation of a set of seven pictures telling a story of a boy who hides a dog that he found on the street (Le Boeuf, 1976) . This battery was chosen because its aim is to allow for its administration to elderly people who are illiterate and/or of low educational level, who represent the majority of the aged sample assisted by the public health system in Brazil.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 144, |
|
"text": "(Jer\u00f4nimo, 2016)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 341, |
|
"end": 353, |
|
"text": "Boeuf, 1976)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Dog Story Dataset", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This dataset consists of 10 narratives transcripts (6 CTL and 4 MCI), where the average number of sentences and the average size of the sentences are 16.60 and 6.58, respectively. When compared with the Cinderella dataset, the dataset is composed of less sentences and the sentences have fewer words on average.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Dog Story Dataset", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We divide our lexical features into two groups: PoS features and word embeddings, where every word is represented in a high dimensionality continuous vector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The PoS features where extracted using a BP The word embeddings used in this work have 50 dimensions and were trained by Fonseca et al. (2015) with articles from the BP version of Wikipedia and a large journalistic corpus with articles from the news site G1 3 , totaling 240 million tokens and a vocabulary of 160,270 words. All of these tokens were made lowercase and trained with a neural language model described in (Collobert et al., 2011).", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 142, |
|
"text": "Fonseca et al. (2015)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used three prosodic features: F0, intensity and duration which were extracted at the phonetic level using PRAAT (Boersma and others, 2002) from forced alignment output. Alignment was done using using the HTK toolkit (Young et al., 2002) with clean speech corpora and a pronunciation dictionary phonetically transcribed by Petrus (Serrani, 2015) and augmented by our rule-based algorithm to insert multiple pronunciations, rendering a suitable model for ASR. The features were calculated for the first, last, penultimate and antepenultimate vowels of each word and pauses. These vowels were chosen based on knowledge of the BP which typically exhibits stress on the penultimate vowel, with notable patterns observed for final vowel stressing, for example words ending in \"i\" (\"Barueri\") or a nasal consonant (\"Renan\"), and the antepenultimate vowel (usually indicated by a stress diacritic) like \"helic\u00f3ptero\" (\"helicopter\"), \"esp\u00edrito\" (\"spirit\") and \"\u00e1rvore\" (\"tree\"). Also, Portuguese, like most western languages, distinguishes sentence types by rising and falling pitch patterns, giving the listener a clue as to whether the speaker has finished a sentence or not. Pause duration was also calculated since the length of a pause can be indicative of the presence of a punctuation mark (Beckman and Ayers Elam, 1997) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 141, |
|
"text": "(Boersma and others, 2002)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 239, |
|
"text": "(Young et al., 2002)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 1291, |
|
"end": 1321, |
|
"text": "(Beckman and Ayers Elam, 1997)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Prosodic features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To automatically extract features from the input and also deal with the problem of long dependencies between words, we propose a model based on recurrent convolutional neural networks (RCNN), which was inspired by the work of Lai et al. (2015) . The architecture of our model can be seen in Figure 1 . First, we show how to prepare the input for the network, then we go through the networks layers and describe the training procedure, finally, we discuss the experimental settings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 226, |
|
"end": 243, |
|
"text": "Lai et al. (2015)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 291, |
|
"end": 299, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model description", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In our approach, the input to the network is a transcribed narrative which is categorized as CTL (healthy elderly individuals) and MCI (MCI patients). The narratives contain a sequence of words w 1 , w 2 , . . . , w m . Each word is annotated with a label, to indicate whether it precedes a boundary (y = B) or not (y = N B). We do not make a distinction between punctuation marks, so a boundary is defined as a period, exclamation mark, question mark, colon or semicolon. With this approach, we can see this task as a binary classification problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input preparation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Our input contains transcribed narratives with m words in it. We represent the narrative i as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "X i \u2208 R m\u00d7n , X i = {x 1 , x 2 , . . . , x m\u00d7n },", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "where n is the number of features. We represent the boundaries as Y i \u2208 R 2 , Y i = {0, 1}, where 0 stands for N B and 1 denotes B. Our final model consists of a combination of two models. The first model is responsible for treating only lexical information, while the second treats only prosodic information. Both models have the same architecture shown in Figure 1 . This strategy is based on the idea that we can train the lexical model with even more data, since textual information is easily found on the web. In order to obtain the most probable class y for the w j word, a linear combination was created between these two models, where one receives the weighted complement of the other:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 366, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u03b1\u2022P lexical (y | w j )+(1\u2212\u03b1)\u2022P prosodic (y | w j ) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Then, the most probable class is the one that maximizes the linear combination from previous equation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The data input for the lexical model is divided into two features: word embeddings with dimensions |e w |, and the PoS tags with dimensions |e t |. Given a word w, the respective embedding e w \u2208 E word is fetched and concatenated with the word's PoS vector e t \u2208 E tag , thus obtaining a new vector size", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding layer", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "d = |e w | + |e t |.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding layer", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "Out of vocabulary words share a single and randomly generated vector that represents an unknown word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding layer", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In the prosodic model we directly feed information about pitch, intensity and duration from the first, last, penultimate and ante-penultimate vowels of each word. Moreover, we feed the information about pause duration after each word, where duration of zero seconds denotes no pause. Therefore, for the prosodic model, we have a vector with dimensions d = 4 \u2022 3 + 1 = 13.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding layer", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "Once we have a matrix formed by the features of the words in the text, the convolutional layer receives it, which, in turn, is responsible for the automatic extraction of n f new features depending on h c neighboring words (Kim, 2014) . The convolutional layer produces a new feature c j by applying a filter W \u2208 R hc\u2022d to a window of h c words x j\u2212hc+1:j in a sentence with length m:", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 234, |
|
"text": "(Kim, 2014)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "c j = f (W x (j\u2212hc+1):j + b), h c \u2264 j \u2264 m (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "Where b \u2208 R represents a bias term and f is a non-linear function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "Our convolutional layer simply moves one dimension vertically, making one step at a time, which gives us m \u2212 h c + 1 generated features. Since we want to classify exactly m elements, we add p = h c /2 zero-padding on both sides of the text. Applying this strategy for each entry x j yields the complete feature map c \u2208 R (m\u2212hc+1)+2\u2022p .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "In addition, we apply a max-pooling operation over time, looking at a region of h m elements to find the most significant features:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "c = max 1\u2264j\u2264m {c (j\u2212hm+1):j }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "(3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Convolutional and pooling layer", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "The new features extracted are fed into a recurrent bidirectional layer which has n r units. A recurrent layer is able to store historic information by connecting the previous hidden state with the current hidden state at a time t. The values in the hidden and output layers are computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h t = f (W x x t + W h h t\u22121 + b h )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y t = g(W y h t + b y )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "where W x , W h , and W y are the connection weights, b y and b h are bias vectors, and f and g are non-linear functions. Here, we use a special unit known as Long Short-Term Memory (LSTM) (Hochreiter and Schmidhuber, 1997) , which is able to learn over long dependencies between words by a purpose-built memory cell. Figure 2 shows a single LSTM memory cell. The LSTM updates for time steps t are done as described by Jozefowicz et al. (2015) , which is a slight simplification of the one described by Graves and Jailty (2014) , where the memory cell is implemented as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 189, |
|
"end": 223, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 419, |
|
"end": 443, |
|
"text": "Jozefowicz et al. (2015)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 527, |
|
"text": "Graves and Jailty (2014)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 318, |
|
"end": 326, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "i t = \u03c3(W xi x t + W hi h t\u22121 + b i ) f t = \u03c3(W xf x t + W hf h t\u22121 + b f ) o t = \u03c3(W xo x t + W ho h t\u22121 + b o ) g t = tanh(W xc x t + W hc h t\u22121 + b c ) c t = f t c t\u22121 + i t g t h t = o t tanh(c t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "where \u03c3(z) = 1/(1 + e \u2212z ) is the sigmoid function, h t \u2208 R nr is the hidden unit, i t \u2208 R nr is the input gate, f t \u2208 R nr is the forget gate, o t \u2208 R nr is the output gate, g t \u2208 R nr is the input modulation gate, and c t \u2208 R nr is the memory cell unit, which is the summation of the previous memory cell modulated by the forget gate f t , and a function of the current input with previous hidden state modulated by the input gate i t .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "As in Graves and Jaitly (2014), we used the features by looking at forward states and backward states. This kind of mechanism is known as a bidirectional neural network (BRNN), since it learns weights based on both past and future elements given a timestep t. In order to implement the BRNN, we reversed the sentences as a trick before we fed them to a regular LSTM layer, doubling the number of weights used in the recurrent layer. The output from this layer is the summation of the forward output with backward output:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y t = \u2190 \u2212 y t + \u2212 \u2192 y t", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "With a bidirectional LSTM layer, we are able to explore the principle that words nearby have a greater influence in classification, while considering that words farther away can also have some impact. This often happens, for example, in the case of question words and conjunctions: por que (\"why\"); qual (\"which\"); quem (\"who\"); quando (\"when\"), etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Recurrent layer", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "After the BRNN layer, dropout is used to prevent co-adaptation of hidden units during forwardbackpropagation, where we ignore some neurons meaning to reduce the chance of overfitting the model (Srivastava et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 218, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fully connected layer", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": "The last layer receives the output from the BRNN in each timestep and passes them trough a fully connected layer, where the softmax operation is calculated, giving us the probability of whether or not the word precedes a boundary:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fully connected layer", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y t = sof tmax(W y t + b)", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Fully connected layer", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": "Where W \u2208 R nr\u00d72 is a matrix of weights, b \u2208 R nr is a bias vector, and softmax is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fully connected layer", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": "s j (z) = e z j K k=1 e z k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fully connected layer", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": ", for j = 1, 2, . . . , K (8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fully connected layer", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": "We define all of the parameters to be trained as \u03b8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b8 = E word , E tag , W (c) , b (c) , W (f ) , b (f ) , \u2190 \u2212 W (r) , \u2190 \u2212 b (r) , \u2212 \u2192 W (r) , \u2212 \u2192 b (r)", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Where E word \u2208 R |V |\u00d7|ew| is the lookup table for the word embeddings, E tag \u2208 R |Vtag|\u00d7|et| is the lookup table for PoS tags, and |V |, |V tag | represents the size of the vocabulary for word embeddings and PoS tags, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For the convolutional layer: the weights W (c) \u2208 R n f \u00d7hc\u2022d and the bias vector", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "b (c) \u2208 R n f .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For the fully connected layer: the weights matrix W (f ) \u2208 R nr\u00d72 and the bias vector b (f ) \u2208 R nr .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For the BRNN layer we divide the set of parameters from BRNN into two sets. Those from the forward pass and backward pass. Each set contains the weights for an input W (r) x \u2208 R nr\u00d7n f , the weights for previous hidden states W (r) h \u2208 R nr\u00d7nr , and the bias vectors b (r) \u2208 R nr for all gates (i, f, o, g). Additionally, we have the weights for an output in a timestep W (r) y \u2208 R nr\u00d7nr and a bias vector b y \u2208 R nr .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We define the loss function L as categorical cross-entropy (Murphy, 2012) , shown in the equation below, which aims to minimize the negative log likelihood in relation to the weights. Since we have an unbalanced class problem, we give different weights for each class, where the weight of the minority class (B) is greater than that of the majority (N B).", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 73, |
|
"text": "(Murphy, 2012)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L(y,\u0177) = \u2212 i y i log(\u0177 i ) cw y i", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Where y are our real targets,\u0177 are our predictions, and cw are the class weights for = B and = N B, calculated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "cw = |y| 2 \u2022 |y = |", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We minimize the loss function with respect to all weights \u03b8 \u2192 L by using RMSProp algorithm (Tieleman and Hinton, 2012) with backpropagation to compute the gradients \u2207L. The update step for a timestep t is made by normalizing the gradients by an exponent moving at an average r t :", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 118, |
|
"text": "(Tieleman and Hinton, 2012)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "r t = \u03b3r t\u22121 + (1 \u2212 \u03b3)\u2207L(\u03b8 t ) 2 (12) \u03b8 t+1 = \u03b8 t \u2212 \u03b7 \u2207L(\u03b8 t ) \u221a r t +", |
|
"eq_num": "(13)" |
|
} |
|
], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Where \u03b7 is the learning rate and 0 < \u03b3 < 1 is the forgetting factor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We break the text in tokens delimited by spaces. We do not remove stopwords from the texts, since they can be important features for our domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment settings", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We ran a 5-fold cross-validation for the group being analyzed (CLT or MCI), which leaves about 10% of the data for testing, the rest for training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment settings", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The weight matrix for tag embeddings E tag was generated randomly from a gaussian distribution scaled by fan in + fan out (Glorot and Bengio, 2010) . Both embeddings matrix E word and E tag were adjusted during training. We follow previous studies on sentence boundary detection to set the network hyper-parameters (Tilk and Alum\u00e4e, 2015; Che et al., 2016) . The values for each parameter are shown in Table 2 Forget factor 0.9 0.9 \u03b7 Learning rate 0.001 0.001 We tried three different learning rate values \u03b7 \u2208 {0.01, 0.003, 0.001} for both lexical and prosodic models, and found that 0.001 yielded best results. We trained our network over 20 epochs using a bucket strategy, which groups training examples in buckets of similar sentence size. Our implementation is based on Theano (Bergstra et al., 2010) , a library that defines, optimizes and evaluates mathematical expressions in an effective way.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 147, |
|
"text": "(Glorot and Bengio, 2010)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 338, |
|
"text": "(Tilk and Alum\u00e4e, 2015;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 339, |
|
"end": 356, |
|
"text": "Che et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 781, |
|
"end": 804, |
|
"text": "(Bergstra et al., 2010)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 402, |
|
"end": 409, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiment settings", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We evaluated our method intrinsically and also compared it with the method developed by Fraser et al. (2015a) for all of the datasets. We also performed robustness tests to indicate how well our method responds to both (i) test data that varies from Cinderella training data and (ii) train data that varies from Cinderella testing data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 109, |
|
"text": "Fraser et al. (2015a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "If we classified all words as N B, our method would have an accuracy superior to 90%. For this reason, we use the F 1 metric, which is defined as the harmonic mean between precision and recall. And since we are more interested in knowing whether our method correctly identifies the boundaries, we ignore the N Bs and calculate F 1 only for the positive class (B).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this subsection, we evaluate the performance of our classifier (RCNN) for the Cinderella and Constitution datasets. Table 3 summarizes the results.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 126, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "From Table 3 we can see that our approach presents better results for the Constitution dataset than Cinderella. This may be related to the text quality, as the Cinderella transcripts presents many disfluences, characteristic of spontaneous speech. As expected, results for CTL were higher than for MCI, since CTL narratives contain less disfluencies. Another important observation is that our method performs much better than the baseline. Where the baseline represents the results for a classifier that predicts all words as B. The Constitution results show us that traditional machine learning techniques used in NLP can be applied to this scenario, since the differences in the Cinderella data are few. Another reason that supports this statement is that F 1 results from related studies on sentence boundary detection based on well-written texts are between 0.7 and 0.8 for two classes (Wang et al., 2012; Khomitsevich et al., 2015; Tilk and Alum\u00e4e, 2015; Che et al., 2016) . When we compare the Constitution size relation we find out that corpus size is not greatly affected by the results, since the results for Constitution S were slightly better than for Constitution L. We think that, even with less data, our method performs better on Constitution S because of the distribution of sentence quantity in the dataset, where Constitution S has an average of 23.48 sentences per text, while Constitution L has an average of only 7.56 sentences per text. Table 3 : F 1 for boundary class for each feature set on Cinderella and Constitution data using our method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 890, |
|
"end": 909, |
|
"text": "(Wang et al., 2012;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 910, |
|
"end": 936, |
|
"text": "Khomitsevich et al., 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 937, |
|
"end": 959, |
|
"text": "Tilk and Alum\u00e4e, 2015;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 977, |
|
"text": "Che et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1459, |
|
"end": 1466, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We also evaluated the performance of different feature sets with our datasets. Embeddings have a great impact on both datasets. The PoS information was influential on both datasets, but by a small margin, since it has a small difference when used with embeddings (0.01) on the Cinderella, and (0.03) Constitution data. This tells us that embeddings already bring enough morphosyntactic information. It is evident that the weight of the prosodic features is higher on Constitution, which is based on prepared speech, than in Cinderela. This result is consistent with those found by Kol\u00e1r et al. (2009) and Fraser et al. (2015a) . We also believe that the quality of the audio recordings may have impacted the weight of the prosodic features, since the Constitution dataset was recorded by speech processing experts in a studio and the Cinderella dataset was recorded in a clinical setting. In light of this, we can see that our method performs better when all features are used. Furthermore, the best results were obtained by using \u03b1 = 0.6, from the linear combination in Equation 1, showing that our model lends more weight to the lexical model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 581, |
|
"end": 600, |
|
"text": "Kol\u00e1r et al. (2009)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 626, |
|
"text": "Fraser et al. (2015a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In order to compare our model with related work, we replicated the approach proposed by Fraser et al. (2015a) , which uses a CRF model for sentence segmentation. To explain the choice for a recurrent convolutional model, we split our method in three: (i) Multilayer Perceptron (MLP): we removed the convolutional and the recurrent layer of our model, and added a hidden fully-connected layer with 100 units and sigmoid activation; (ii) CNN: we simply removed the recurrent layer from our model and passed the output from the convolutional to the fully-connected layer; (iii) Recurrent Neural Network (RNN): analogous to the CNN model, we removed the convolutional layer and connected the embedding layer with the recurrent layer. The results for each method are presented in Table 4 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 109, |
|
"text": "Fraser et al. (2015a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 775, |
|
"end": 782, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our method achieved the best results in both datasets. We can see that the CRF method, used by Fraser et al. (2015a) , obtained the worst results on Constitution, and was only better than RNN on the Cinderella data. These results were similar to those reported in their paper, which suggests that our replication was faithful. We believe that the RNN performed poorly because it has a large set of weights to be trained, and since we have relatively little data, it failed to achieve good results. This may be related to the fact that LSTM units are very complex and need more data to be able to converge. Looking at the Constitution results, which have about three times more words than the Cinderella data, we can note the difference (\u223c 0.2) with relation to corpus size. MLP and CNN alone were able to achieve better results than CRF and RNN, but MLP results for the MCI subset were not as good as CNN, which indicates that MLP alone is not able to deal with narratives that are potentially impaired. However, for the Constitution data, MLP obtained results very close (\u223c 0.02) to our best method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 116, |
|
"text": "Fraser et al. (2015a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our RCNN achieved the best results on both datasets, implying that a union of these models was a good choice in order to deal with impaired speech. We believe that the greatest influence was from the CNN, and the addition of a recurrent layer with LSTM was able to deal with some particular cases, likely over long dependencies similar to the findings in (Tilk and Alum\u00e4e, 2015) , where the CNN was not able to do so due to the fixed filter length in the convolution process, a re- sult which was also noted in (Che et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 378, |
|
"text": "(Tilk and Alum\u00e4e, 2015)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 529, |
|
"text": "(Che et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison of methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Robustness was evaluated by measuring F 1 on both out-of-genre and in-genre data. The results for each configuration are presented in Table 5 . We evaluated our method by changing the corpus genre: training with the Constitution and testing with the Cinderella dataset. This evaluation shows that our method performed poorly in this scenario, probably because the differences in the lexical clues between these datasets are high, since the Constitution is composed of prepared speech and Cinderella of spontaneous speech. When we maintain the corpus genre but change the story used in the neuropsychological test, our method can still achieve good results, yielding a small difference of 0.08 for CTL and 0.06 for MCI from our best results. We believe that these results are related with the linear combination weight from Equation 1, where the results were obtained by using \u03b1 = 0.8, lending less weight to the prosodic model when compared to our best results (where it has 40% of influence). Since the Dog Story and Cinderella datasets are composed of spontaneous speech, the lexical clues found in this kind of speech helped the method to achieve good performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 141, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Robustness tests", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We have shown that our model, using a recurrent convolutional neural network, is benefited by word embeddings and can achieve promising results even with a small amount of data. We found that our method is better for cases where speech is planned, since the prosodic features lend more weight to the classification. Our method achieved good results on impaired speech transcripts even with little data, with an F 1 result of 0.74 on CTL patients, which is comparable with the results from other studies using broadcast news and conversational data (Wang et al., 2012; Khomitsevich et al., 2015; Tilk and Alum\u00e4e, 2015; Che et al., 2016) . Moreover, our method achieved good results in robustness tests when we changed the story used in the neuropsychological test.", |
|
"cite_spans": [ |
|
{ |
|
"start": 548, |
|
"end": 567, |
|
"text": "(Wang et al., 2012;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 594, |
|
"text": "Khomitsevich et al., 2015;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 617, |
|
"text": "Tilk and Alum\u00e4e, 2015;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 635, |
|
"text": "Che et al., 2016)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As for future work, we plan to evaluate our method on English data for comparison with related work. Also, we plan on using more text data to train the lexical model, as it is independent from the prosodic model and lends more weight in our evaluations. Moreover, we will evaluate our method with the output of an ASR system for BP, as a higher word recognition error rate can greatly affect our results. Lastly, we would like to evaluate our method with datasets with higher quality audio, more robust acoustic models and a manually aligned portion of the database as better audio segmentation would greatly improve the model and the usefulness of prosodic features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "With respect to improvements in the corpus, our dataset consists of spontaneous speech narratives and was annotated only with periods. Since there are initial conjunctions such as \"and\", \"moreover\", and \"however\", we could include commas. This would turn our problem into a ternary problem. This could be done by increasing the number of neurons in the last layer of our architecture.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "nilc.icmc.usp.br/nlpnet/ 3 g1.globo.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank CNPq for a scholarship granted to the first author.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Evaluating progression of alzheimer's disease by regression and classification methods in a narrative language test in portuguese", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Cunha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Scarton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International Conference on Computational Processing of the Portuguese Language", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "374--384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Alu\u00edsio, A. Cunha, and C. Scarton. 2016. Evaluat- ing progression of alzheimer's disease by regression and classification methods in a narrative language test in portuguese. International Conference on Computational Processing of the Portuguese Lan- guage, pages 374-384, July.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Recovering Capitalization and Punctuation Marks on Speech Transcriptions", |
|
"authors": [ |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Batista", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nuno", |
|
"middle": [], |
|
"last": "Mamede", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fernando Batista and Nuno Mamede. 2011. Re- covering Capitalization and Punctuation Marks on Speech Transcriptions. Ph.D. thesis, Instituto Supe- rior T\u00e9cnico.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Avan\u00e7os em reconhecimento de fala para portugu\u00eas brasileiro e aplica\u00e7\u00f5es: ditado no libreoffice e unidade de resposta aud\u00edvel com asterisk", |
|
"authors": [ |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Santos", |
|
"middle": [], |
|
"last": "Batista", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pedro dos Santos Batista. 2013. Avan\u00e7os em re- conhecimento de fala para portugu\u00eas brasileiro e aplica\u00e7\u00f5es: ditado no libreoffice e unidade de re- sposta aud\u00edvel com asterisk.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "ABCD: Arizona Battery for Communication Disorders of Dementia", |
|
"authors": [ |
|
{ |
|
"first": "Kathryn", |
|
"middle": [], |
|
"last": "Bayles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Tomoeda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kathryn Bayles and C.K. Tomoeda. 1991. ABCD: Ari- zona Battery for Communication Disorders of De- mentia. Tucson, AZ: Canyonlands Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Guidelines for tobi labelling: The ohio state university research foundation", |
|
"authors": [ |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Beckman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gayle", |
|
"middle": [ |
|
"Ayers" |
|
], |
|
"last": "Elam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mary E. Beckman and Gayle Ayers Elam. 1997. Guidelines for tobi labelling: The ohio state univer- sity research foundation.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Theano: A cpu and gpu math compiler in python", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bergstra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Breuleux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Bastien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Lamblin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Pascanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Desjardins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Turian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Warde-Farley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. 9th Python in Science Conf", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Bergstra, Olivier Breuleux, Fr\u00e9d\u00e9ric Bastien, Pascal Lamblin, Razvan Pascanu, Guillaume Des- jardins, Joseph Turian, David Warde-Farley, and Yoshua Bengio. 2010. Theano: A cpu and gpu math compiler in python. In Proc. 9th Python in Science Conf, pages 1-7.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Praat, a system for doing phonetics by computer", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Boersma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Glot international", |
|
"volume": "5", |
|
"issue": "9", |
|
"pages": "341--345", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Boersma et al. 2002. Praat, a system for do- ing phonetics by computer. Glot international, 5(9/10):341-345.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Punctuation prediction for unsegmented transcript based on word vector", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoyin", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cheng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haojin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Meinel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoyin Che, Cheng Wang, Haojin Yang, and Christoph Meinel. 2016. Punctuation prediction for unsegmented transcript based on word vector. In Proceedings of the Tenth International Confer- ence on Language Resources and Evaluation (LREC 2016), Paris, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Natural language processing (almost) from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Karlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Kuksa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2493--2537", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, 12(Aug):2493-2537.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Evaluating word embeddings and a revised corpus for part-of-speech tagging in portuguese", |
|
"authors": [ |
|
{ |
|
"first": "Erick", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Fonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Lu\u00eds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of the Brazilian Computer Society", |
|
"volume": "21", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erick R. Fonseca, Jo\u00e3o Lu\u00eds G. Rosa, and Sandra Maria Alu\u00edsio. 2015. Evaluating word embeddings and a revised corpus for part-of-speech tagging in por- tuguese. Journal of the Brazilian Computer Society, 21(1):1.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Sentence segmentation of aphasic speech", |
|
"authors": [ |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naama", |
|
"middle": [], |
|
"last": "Ben-David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graeme", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naida", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Rochon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "The 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "862--871", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kathleen C. Fraser, Naama Ben-David, Graeme Hirst, Naida Graham, and Elizabeth Rochon. 2015a. Sen- tence segmentation of aphasic speech. In Proceed- ings of the NAACL HLT 2015, The 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 862-871.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Linguistic features identify alzheimer's disease in narrative speech", |
|
"authors": [ |
|
{ |
|
"first": "Kathleen", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jed", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Meltzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Rudzicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Alzheimer's Disease", |
|
"volume": "49", |
|
"issue": "2", |
|
"pages": "407--422", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kathleen C. Fraser, Jed A. Meltzer, and Frank Rudz- icz. 2015b. Linguistic features identify alzheimer's disease in narrative speech. Journal of Alzheimer's Disease, 49(2):407-422.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Understanding the difficulty of training deep feedforward neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Glorot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "In Aistats", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "249--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Aistats, volume 9, pages 249-256.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Towards endto-end speech recognition with recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "ICML", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1764--1772", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Graves and Navdeep Jaitly. 2014. Towards end- to-end speech recognition with recurrent neural net- works. In ICML, volume 14, pages 1764-1772.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Multi-pass sentence-end detection of lecture speech", |
|
"authors": [ |
|
{ |
|
"first": "Madina", |
|
"middle": [], |
|
"last": "Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rama", |
|
"middle": [], |
|
"last": "Doddipatla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2902--2906", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Madina Hasan, Rama Doddipatla, and Thomas Hain. 2014. Multi-pass sentence-end detection of lecture speech. In INTERSPEECH, pages 2902-2906.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Is mild cognitive impairment a precursor of alzheimer's disease? short review", |
|
"authors": [ |
|
{ |
|
"first": "Jana", |
|
"middle": [], |
|
"last": "Janoutov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Ser\u1ef3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ladislav", |
|
"middle": [], |
|
"last": "Hos\u00e1k", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladim\u00edr", |
|
"middle": [], |
|
"last": "Janout", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Central European journal of public health", |
|
"volume": "23", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jana Janoutov\u00e1, Omar Ser\u1ef3, Ladislav Hos\u00e1k, and Vladim\u00edr Janout. 2015. Is mild cognitive impair- ment a precursor of alzheimer's disease? short re- view. Central European journal of public health, 23(4):365.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Produ\u00e7\u00e3o de narrativas orais no envelhecimento sadio, no comprometimento cognitivo leve e na doen\u00e7a de Alzheimer e sua rela\u00e7\u00e3o com construtos cognitivos e escolaridade", |
|
"authors": [ |
|
{ |
|
"first": "Gislaine", |
|
"middle": [], |
|
"last": "Machado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jer\u00f4nimo", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gislaine Machado Jer\u00f4nimo. 2016. Produ\u00e7\u00e3o de nar- rativas orais no envelhecimento sadio, no compro- metimento cognitivo leve e na doen\u00e7a de Alzheimer e sua rela\u00e7\u00e3o com construtos cognitivos e escolari- dade. Ph.D. thesis.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "An empirical exploration of recurrent network architectures", |
|
"authors": [ |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "Jozefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Zaremba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rafal Jozefowicz, Wojciech Zaremba, and Ilya Sutskever. 2015. An empirical exploration of re- current network architectures. Journal of Machine Learning Research.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Combining prosodic and lexical classifiers for two-pass punctuation detection in a russian asr system", |
|
"authors": [ |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Khomitsevich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Chistikov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tatiana", |
|
"middle": [], |
|
"last": "Krivosheeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Epimakhova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irina", |
|
"middle": [], |
|
"last": "Chernykh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Speech and Computer", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olga Khomitsevich, Pavel Chistikov, Tatiana Krivosheeva, Natalia Epimakhova, and Irina Chernykh. 2015. Combining prosodic and lexical classifiers for two-pass punctuation detection in a russian asr system. In International Conference on Speech and Computer, pages 161-169. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Convolutional neural networks for sentence classification", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1746--1751", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceedings of the 2014 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1746-1751. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Genre effects on automatic sentence segmentation of speech: A comparison of broadcast news and broadcast conversations", |
|
"authors": [ |
|
{ |
|
"first": "J\u00e1chym", |
|
"middle": [], |
|
"last": "Kol\u00e1r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "IEEE International Conference on Acoustics, Speech and Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4701--4704", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00e1chym Kol\u00e1r, Yang Liu, and Elizabeth Shriberg. 2009. Genre effects on automatic sentence segmentation of speech: A comparison of broadcast news and broadcast conversations. In 2009 IEEE Interna- tional Conference on Acoustics, Speech and Signal Processing, pages 4701-4704. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Recurrent convolutional neural networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Siwei", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, AAAI'15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2267--2273", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siwei Lai, Liheng Xu, Kang Liu, and Jun Zhao. 2015. Recurrent convolutional neural networks for text classification. In Proceedings of the Twenty- Ninth AAAI Conference on Artificial Intelligence, AAAI'15, pages 2267-2273. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Raconte: 55 historiettes en images. L'\u00c9cole", |
|
"authors": [ |
|
{ |
|
"first": "Christine", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Boeuf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1976, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christine Le Boeuf. 1976. Raconte: 55 historiettes en images. L'\u00c9cole.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Fully automated neuropsychological assessment for detecting mild cognitive impairment", |
|
"authors": [ |
|
{ |
|
"first": "Maider", |
|
"middle": [], |
|
"last": "Lehr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [ |
|
"Tucker" |
|
], |
|
"last": "Prudhommeaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Izhak", |
|
"middle": [], |
|
"last": "Shafran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Roark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1039--1042", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maider Lehr, Emily Tucker Prudhommeaux, Izhak Shafran, and Brian Roark. 2012. Fully automated neuropsychological assessment for detecting mild cognitive impairment. In INTERSPEECH, pages 1039-1042.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A study in machine learning from imbalanced data for sentence boundary detection in speech", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Nitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Chawla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Harper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computer Speech and Language", |
|
"volume": "20", |
|
"issue": "4", |
|
"pages": "468--494", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Nitesh V. Chawla, Mary P. Harper, Elizabeth Shriberg, and Andreas Stolcke. 2006. A study in machine learning from imbalanced data for sentence boundary detection in speech. Computer Speech and Language, 20(4):468-494.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Experiments on sentence boundary detection in usergenerated web content", |
|
"authors": [ |
|
{ |
|
"first": "Roque", |
|
"middle": [], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Thiago", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Computational Linguistics and Intelligent Text Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "227--237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roque L\u00f3pez and Thiago A.S. Pardo. 2015. Ex- periments on sentence boundary detection in user- generated web content. In Computational Linguis- tics and Intelligent Text Processing, pages 227-237.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "The diagnosis of dementia due to alzheimer's disease: Recommendations from the national institute on aging-alzheimer's association workgroups on diagnostic guidelines for alzheimer's disease", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Guy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Mckhann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Howard", |
|
"middle": [], |
|
"last": "Knopman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bradley", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Chertkow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clifford", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Hyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jack", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Claudia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Kawas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Klunk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Koroshetz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Manly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mayeux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Alzheimer's & Dementia", |
|
"volume": "7", |
|
"issue": "3", |
|
"pages": "263--269", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guy M. McKhann, David S. Knopman, Howard Chertkow, Bradley T. Hyman, Clifford R. Jack Jr., Claudia H. Kawas, William E. Klunk, Walter J. Ko- roshetz, Jennifer J. Manly, Richard Mayeux, et al. 2011. The diagnosis of dementia due to alzheimer's disease: Recommendations from the national insti- tute on aging-alzheimer's association workgroups on diagnostic guidelines for alzheimer's disease. Alzheimer's & Dementia, 7(3):263-269.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "The uniform data set (uds): clinical and cognitive variables and descriptive data from alzheimer disease centers", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Morris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "Weintraub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helena", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Chui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Cummings", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Decarli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Ferris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norman", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [], |
|
"last": "Galasko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neill", |
|
"middle": [], |
|
"last": "Graff-Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elaine", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Peskind", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Alzheimer Disease & Associated Disorders", |
|
"volume": "20", |
|
"issue": "4", |
|
"pages": "210--216", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John C. Morris, Sandra Weintraub, Helena C. Chui, Jeffrey Cummings, Charles DeCarli, Steven Ferris, Norman L. Foster, Douglas Galasko, Neill Graff- Radford, Elaine R. Peskind, et al. 2006. The uni- form data set (uds): clinical and cognitive variables and descriptive data from alzheimer disease cen- ters. Alzheimer Disease & Associated Disorders, 20(4):210-216.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Prevalence of potentially reversible conditions in dementia and mild cognitive impairment in a geriatric clinic", |
|
"authors": [ |
|
{ |
|
"first": "Weerasak", |
|
"middle": [], |
|
"last": "Muangpaisan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chonachan", |
|
"middle": [], |
|
"last": "Petcharat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varalak", |
|
"middle": [], |
|
"last": "Srinonprasert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Geriatrics & Gerontology International", |
|
"volume": "12", |
|
"issue": "1", |
|
"pages": "59--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weerasak Muangpaisan, Chonachan Petcharat, and Varalak Srinonprasert. 2012. Prevalence of po- tentially reversible conditions in dementia and mild cognitive impairment in a geriatric clinic. Geriatrics & Gerontology International, 12(1):59-64.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Machine learning: a probabilistic perspective", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Murphy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin P. Murphy. 2012. Machine learning: a proba- bilistic perspective. MIT press.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Spoken language derived measures for detecting mild cognitive impairment. Audio, Speech, and Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Roark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Margaret", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John-Paul", |
|
"middle": [], |
|
"last": "Hosom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristy", |
|
"middle": [], |
|
"last": "Hollingshead", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Kaye", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "IEEE Transactions on", |
|
"volume": "19", |
|
"issue": "7", |
|
"pages": "2081--2090", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brian Roark, Margaret Mitchell, John-Paul Hosom, Kristy Hollingshead, and Jeffrey Kaye. 2011. Spoken language derived measures for detect- ing mild cognitive impairment. Audio, Speech, and Language Processing, IEEE Transactions on, 19(7):2081-2090.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Ambiente web de suporte\u00e0 transcri\u00e7\u00e3o fon\u00e9tica autom\u00e1tica de lemas em verbetes de dicion\u00e1rios do portugu\u00eas do brasil", |
|
"authors": [ |
|
{ |
|
"first": "Vanessa Marquiaf\u00e1vel", |
|
"middle": [], |
|
"last": "Serrani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vanessa Marquiaf\u00e1vel Serrani. 2015. Ambiente web de suporte\u00e0 transcri\u00e7\u00e3o fon\u00e9tica autom\u00e1tica de lemas em verbetes de dicion\u00e1rios do portugu\u00eas do brasil.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "An analysis of sentence boundary detection systems for english and portuguese documents", |
|
"authors": [ |
|
{ |
|
"first": "Carlos", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Silla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Celso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kaestner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "International Conference on Intelligent Text Processing and Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "135--141", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carlos N. Silla Jr. and Celso A.A. Kaestner. 2004. An analysis of sentence boundary detection systems for english and portuguese documents. In International Conference on Intelligent Text Processing and Com- putational Linguistics, pages 135-141. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Dropout: a simple way to prevent neural networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey E. Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdi- nov. 2014. Dropout: a simple way to prevent neural networks from overfitting. Journal of Machine Learning Research, 15(1):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Non-pharmacological interventions on cognitive functions in older people with mild cognitive impairment (mci). Archives of gerontology and geriatrics", |
|
"authors": [ |
|
{ |
|
"first": "Camila", |
|
"middle": [], |
|
"last": "Vieira Ligo Teixeira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lilian", |
|
"middle": [], |
|
"last": "Teresa Bucken", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilla", |
|
"middle": [], |
|
"last": "Gobbi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florindo", |
|
"middle": [], |
|
"last": "Icassatti Corazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos\u00e9 Luiz Riani", |
|
"middle": [], |
|
"last": "Stella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebasti\u00e3o", |
|
"middle": [], |
|
"last": "Costa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gobbi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "54", |
|
"issue": "", |
|
"pages": "175--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Camila Vieira Ligo Teixeira, Lilian Teresa Bucken Gobbi, Danilla Icassatti Corazza, Florindo Stella, Jos\u00e9 Luiz Riani Costa, and Sebasti\u00e3o Gobbi. 2012. Non-pharmacological interventions on cognitive functions in older people with mild cognitive impair- ment (mci). Archives of gerontology and geriatrics, 54(1):175-180.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude", |
|
"authors": [ |
|
{ |
|
"first": "Tijmen", |
|
"middle": [], |
|
"last": "Tieleman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "COURSERA: Neural Networks for Machine Learning", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tijmen Tieleman and Geoffrey Hinton. 2012. Lecture 6.5-rmsprop: Divide the gradient by a running av- erage of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 4(2).", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Lstm for punctuation restoration in speech transcripts", |
|
"authors": [ |
|
{ |
|
"first": "Ottokar", |
|
"middle": [], |
|
"last": "Tilk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanel", |
|
"middle": [], |
|
"last": "Alum\u00e4e", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "INTER-SPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ottokar Tilk and Tanel Alum\u00e4e. 2015. Lstm for punc- tuation restoration in speech transcripts. In INTER- SPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Dynamic conditional random fields for joint sentence boundary and punctuation prediction", |
|
"authors": [ |
|
{ |
|
"first": "Xuancong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khe Chai", |
|
"middle": [], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuancong Wang, Hwee Tou Ng, and Khe Chai Sim. 2012. Dynamic conditional random fields for joint sentence boundary and punctuation prediction. In INTERSPEECH.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Wechsler memory scale (WMS-III)", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Wechsler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Wechsler. 1997. Wechsler memory scale (WMS- III). Psychological Corporation.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A deep neural network approach for sentence boundary detection in broadcast news", |
|
"authors": [ |
|
{ |
|
"first": "Chenglin", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guangpu", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiong", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Engsiong", |
|
"middle": [], |
|
"last": "Chng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haizhou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2887--2891", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenglin Xu, Lei Xie, Guangpu Huang, Xiong Xiao, Engsiong Chng, and Haizhou Li. 2014. A deep neural network approach for sentence boundary de- tection in broadcast news. In INTERSPEECH, pages 2887-2891.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Using linguistic features longitudinally to predict clinical scores for alzheimer's disease and related dementias", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Yancheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathleen", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Rudzicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "6th Workshop on Speech and Language Processing for Assistive Technologies (SLPAT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Yancheva, Kathleen Fraser, and Frank Rudz- icz. 2015. Using linguistic features longitudi- nally to predict clinical scores for alzheimer's dis- ease and related dementias. In 6th Workshop on Speech and Language Processing for Assistive Tech- nologies (SLPAT), page 134.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "The htk book", |
|
"authors": [ |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gunnar", |
|
"middle": [], |
|
"last": "Evermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Gales", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Hain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Kershaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xunying", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gareth", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Odell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dave", |
|
"middle": [], |
|
"last": "Ollason", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Cambridge university engineering department", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steve Young, Gunnar Evermann, Mark Gales, Thomas Hain, Dan Kershaw, Xunying Liu, Gareth Moore, Julian Odell, Dave Ollason, Dan Povey, et al. 2002. The htk book. Cambridge university engineering department, 3:175.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Architecture of the RCNN for both lexical and prosodic model. morphosyntatic tagger called nlpnet 2 trained on a revised version of the Mac-Morpho corpus(Fonseca et al., 2015), which contains a set of 25 tags.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Diagram of a LSTM memory cell.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"3\">A third dataset was used in robustness</td></tr><tr><td>tests (Section 3.3).</td><td/><td/></tr><tr><td>Info</td><td>CTL MCI</td><td>AD</td></tr><tr><td>Avg. Age</td><td>74.8 73.3</td><td>78.2</td></tr><tr><td>Avg. Education</td><td>11.4 10.8</td><td>8.6</td></tr><tr><td colspan=\"3\">No. of Male/Female 4/16 6/14 10/10</td></tr></table>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Var. Parameter</td><td colspan=\"2\">Lexical Prosodic</td></tr><tr><td colspan=\"2\">|e w | Word emb. size</td><td>50</td><td>-</td></tr><tr><td colspan=\"2\">|e t | Tag emb. size</td><td>10</td><td>-</td></tr><tr><td>n f</td><td>Conv. filters</td><td>100</td><td>8</td></tr><tr><td>h c</td><td>Filter length</td><td>7</td><td>5</td></tr><tr><td>h m</td><td>Max-pool size</td><td>3</td><td>3</td></tr><tr><td>n r</td><td>Recurrent units</td><td>100</td><td>100</td></tr><tr><td>\u03b3</td><td/><td/><td/></tr></table>", |
|
"html": null, |
|
"text": "." |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "RCNN Hyper-parameters." |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Best F 1 results for each method." |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Results for robustness tests" |
|
} |
|
} |
|
} |
|
} |