|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:06:08.321963Z" |
|
}, |
|
"title": "Robust Prediction of Punctuation and Truecasing for Medical ASR", |
|
"authors": [ |
|
{ |
|
"first": "Monica", |
|
"middle": [], |
|
"last": "Sunkara", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Srikanth", |
|
"middle": [], |
|
"last": "Ronanki", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kalpit", |
|
"middle": [], |
|
"last": "Dixit", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sravan", |
|
"middle": [], |
|
"last": "Bodapati", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon AWS AI", |
|
"location": { |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automatic speech recognition (ASR) systems in the medical domain that focus on transcribing clinical dictations and doctor-patient conversations often pose many challenges due to the complexity of the domain. ASR output typically undergoes automatic punctuation to enable users to speak naturally, without having to vocalise awkward and explicit punctuation commands, such as \"period\", \"add comma\" or \"exclamation point\", while truecasing enhances user readability and improves the performance of downstream NLP tasks. This paper proposes a conditional joint modeling framework for prediction of punctuation and truecasing using pretrained masked language models such as BERT, BioBERT and RoBERTa. We also present techniques for domain and task specific adaptation by finetuning masked language models with medical domain data. Finally, we improve the robustness of the model against common errors made in ASR by performing data augmentation. Experiments performed on dictation and conversational style corpora show that our proposed model achieves \u223c5% absolute improvement on ground truth text and \u223c10% improvement on ASR outputs over baseline models under F1 metric.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automatic speech recognition (ASR) systems in the medical domain that focus on transcribing clinical dictations and doctor-patient conversations often pose many challenges due to the complexity of the domain. ASR output typically undergoes automatic punctuation to enable users to speak naturally, without having to vocalise awkward and explicit punctuation commands, such as \"period\", \"add comma\" or \"exclamation point\", while truecasing enhances user readability and improves the performance of downstream NLP tasks. This paper proposes a conditional joint modeling framework for prediction of punctuation and truecasing using pretrained masked language models such as BERT, BioBERT and RoBERTa. We also present techniques for domain and task specific adaptation by finetuning masked language models with medical domain data. Finally, we improve the robustness of the model against common errors made in ASR by performing data augmentation. Experiments performed on dictation and conversational style corpora show that our proposed model achieves \u223c5% absolute improvement on ground truth text and \u223c10% improvement on ASR outputs over baseline models under F1 metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Medical ASR systems automatically transcribe medical speech found in a variety of use cases like physician-dictated notes , telemedicine and even doctor-patient conversations (Chiu et al., 2017) , without any human intervention. These systems ease the burden of long hours of administrative work and also promote better engagement with patients. However, the generated ASR outputs are typically devoid of punctuation and truecasing thereby making it difficult to comprehend. Furthermore, their recovery improves the accuracy of subsequent natural language understanding algorithms (Peitz et al., 2011a; Makhoul et al., 2005) to identify information such as patient diagnosis, treatments, dosages, symptoms and signs. Typically, clinicians explicitly dictate the punctuation commands like \"period\", \"add comma\" etc., and a postprocessing component takes care of punctuation restoration. This process is usually error-prone as the clinicians may struggle with appropriate punctuation insertion during dictation. Moreover, doctor-patient conversations lack explicit vocalization of punctuation marks motivating the need for automatic prediction of punctuation and truecasing. In this work, we aim to solve the problem of automatic punctuation and truecasing restoration to medical ASR system text outputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 194, |
|
"text": "(Chiu et al., 2017)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 602, |
|
"text": "(Peitz et al., 2011a;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 624, |
|
"text": "Makhoul et al., 2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most recent approaches to punctuation and truecasing restoration problem rely on deep learning (Nguyen et al., 2019a; . Although it is a well explored problem in the literature, most of these improvements do not directly translate to great real world performance in all settings. For example, unlike general text, it is a harder problem to solve when applied to the medical domain for various reasons and we illustrate each of them:", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 117, |
|
"text": "(Nguyen et al., 2019a;", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Large vocabulary: ASR systems in the medical domain have a large set of domain-specific vocabulary and several abbreviations. Owing to the domain specific data set and the open vocabulary in LVCSR (large-vocabulary continuous speech recognition) outputs, we often run into OOV (out of vocabulary) or rare word problems. Furthermore, a large vocabulary set leads to data sparsity issues. We address both these problems by using subword models. Subwords have been shown to work well in open-vocabulary speech recognition and several NLP tasks (Sennrich et al., 2015; Bodapati et al., 2019) . We compare word and subword models across different architectures and show that subword models consistently outperform the former.", |
|
"cite_spans": [ |
|
{ |
|
"start": 543, |
|
"end": 566, |
|
"text": "(Sennrich et al., 2015;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 589, |
|
"text": "Bodapati et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Data scarcity: Data scarcity is one of the major bottlenecks in supervised learning. When it comes to the medical domain, obtaining data is not as straight-forward as some of the other domains where abundance of text is available. On the other hand, obtaining large amounts of data is a tedious and costly process; procuring and maintaining it could be a challenge owing to the strict privacy laws. We overcome the data scarcity problem, by using pretrained masked language models like BERT (Devlin et al., 2018) and its successors (Liu et al., 2019; Yang et al., 2019) which have successfully been shown to produce stateof-the-art results when finetuned for several downstream tasks like question answering and language inference. We approach the prediction task as a sequence labeling problem and jointly learn punctuation and truecasing. We show that finetuning a pretrained model with a very small medical dataset (\u223c500k words) has \u223c5% absolute performance improvement in terms of F1 compared to a model trained from scratch. We further boost the performance by first finetuning the masked language model to the medical speech domain and then to the downstream task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 493, |
|
"end": 514, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 534, |
|
"end": 552, |
|
"text": "(Liu et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 553, |
|
"end": 571, |
|
"text": "Yang et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 ASR Robustness: Models trained on ground truth data are not exposed to typical errors in speech recognition and perform poorly when evaluated on ASR outputs. Our objective is to make the punctuation prediction and truecasing more robust to speech recognition errors and establish a mechanism to test the performance of the model quantitatively. To address this issue, we propose a data augmentation based approach using n-best lists from ASR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The contributions of this work are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A general post-processing framework for conditional joint labeling of punctuation and truecasing for medical ASR (clinical dictation and conversations).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 An analysis comparing different embeddings that are suitable for the medical domain. An in-depth analysis of the effectiveness of using pretrained masked language models like BERT and its successors to address the data scarcity problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Techniques for effective domain and task adaptation using Masked Language Model (MLM) finetuning of BERT on medical domain data to boost the downstream task performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Method for enhancing robustness of the models via data augmentation with n-best lists (from ASR output) to the ground truth during training to improve performance on ASR hypothesis at inference time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows. Section 2 presents related work on punctuation and truecasing restoration. Section 3 introduces the model architecture used in this paper and describes various techniques for improving accuracy and robustness. The experimental evaluation and results are discussed in Section 4 and finally, Section 5 presents the conclusions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several researchers have proposed a number of methodologies such as the use of probabilistic machine learning models, neural network models, and the acoustic fusion approaches for punctuation prediction. We review related work in these areas below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In earlier efforts, punctuation prediction has been approached by using finite state or hidden Markov models (Gotoh and Renals, 2000; Christensen et al., 2001a) . Several other approaches addressed it as a language modeling problem by predicting the most probable sequence of words with punctuation marks inserted (Stolcke et al., 1998; Beeferman et al., 1998; Gravano et al., 2009) . Some others used conditional random fields (CRFs) (Lu and Ng, 2010; Ueffing et al., 2013) and maximum entropy using n-grams (Huang and Zweig, 2002) . The rise of stronger machine learning techniques such as deep and/or recurrent neural networks replaced these conventional models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 133, |
|
"text": "(Gotoh and Renals, 2000;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 160, |
|
"text": "Christensen et al., 2001a)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 336, |
|
"text": "(Stolcke et al., 1998;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 360, |
|
"text": "Beeferman et al., 1998;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 382, |
|
"text": "Gravano et al., 2009)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 452, |
|
"text": "(Lu and Ng, 2010;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 474, |
|
"text": "Ueffing et al., 2013)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 532, |
|
"text": "(Huang and Zweig, 2002)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Earlier methods", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Some methods used only acoustic information such as speech rate, intonation, pause duration etc., (Christensen et al., 2001b; Levy et al., 2012) . While pauses influence in the prediction of Comma, intonation helps in disambiguation between punctuation marks like period and exclamation. Although this seemed to work, the most effective approach is to combine acoustic information with lexical information at word level using force-aligned duration (Klejch et al., 2017) . In this work, we only considered lexical input and a pretrained lexical encoder for prediction of punctuation and truecasing. The use of pretrained acoustic encoder and fusion with lexical outputs are possible extensions in future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 125, |
|
"text": "(Christensen et al., 2001b;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 144, |
|
"text": "Levy et al., 2012)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 470, |
|
"text": "(Klejch et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using acoustic information", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Neural approaches for punctuation and truecasing can be classified into two broad categories: sequence labeling based models and MT-based seq2seq models. These approaches have proven to be quite effective in capturing the contextual information and achieved huge success. While some approaches considered only punctuation prediction, some others jointly modeled punctuation and truecasing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural approaches", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "One set of approaches treated punctuation as a machine translation problem and used phrase based statistical machine translation systems to output punctuated and true cased text (Peitz et al., 2011b; Cho et al., 2012; Driesen et al., 2014) . Inspired by recent end-to-end approaches, (Yi and Tao, 2019) proposed the use of self-attention based transformer model to predict punctuation marks as output sequence for given word sequences. Most recently, (Nguyen et al., 2019b) proposed joint modeling of punctuation and truecasing by generating words with punctuation marks as part of the decoding. Although seq2seq based approaches have shown a strong performance, they are intensive, demanding and are not suitable for production deployment at large scale.", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 199, |
|
"text": "(Peitz et al., 2011b;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 217, |
|
"text": "Cho et al., 2012;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 239, |
|
"text": "Driesen et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 302, |
|
"text": "(Yi and Tao, 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 473, |
|
"text": "(Nguyen et al., 2019b)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural approaches", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "For sequence labeling problem, each word in the input is tagged with a punctuation. If there is no punctuation associated with a word, a blank label is used and is often referred as \"no punc\". (Cho et al., 2015) used a combination of neural networks and CRFs for joint prediction of punctuation and disfluencies. With growing popularity in deep recurrent neural networks, LSTMs and BLSTMs with attention mechanism were introduced for punctuation restoration Alum\u00e4e, 2015, 2016) . Later, (Pahuja et al., 2017) proposed joint training of punc-tuation and truecasing using BLSTM models. This work addressed joint learning as two correlated tasks, and predicted punctuation and truecasing as two independent outputs. Our proposed approach is similar to this work, but we rather condition truecasing prediction on punctuation output; this is discussed in detail in Section 3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 211, |
|
"text": "(Cho et al., 2015)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 477, |
|
"text": "Alum\u00e4e, 2015, 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 487, |
|
"end": 508, |
|
"text": "(Pahuja et al., 2017)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural approaches", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Punctuation and casing restoration for speech/ASR outputs in the medical domain has not been explored extensively. Recently, proposed a sequence labeling model using bi-directional RNNs with an attention mechanism and late fusion for punctuation restoration to clinical dictation. To our knowledge, there has not been any work on medical conversations, and we aim to bridge the gap here with latest advances in NLP with large-scale pretrained language models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural approaches", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We propose a postprocessing framework for conditional and joint learning of punctuation and truecasing prediction. Consider an input utterance", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modeling : Conditional Joint labeling of Punctuation + Casing", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "x 1:T = {x 1 , x 2 , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modeling : Conditional Joint labeling of Punctuation + Casing", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ".., x T }, of length T and consisting of words x i . The first step in our modeling process involves punctuation prediction as a sequence tagging task. Once the model predicts a probability distribution over punctuation, this along with the input utterance is fed in as input for predicting the case of a word x i . We consider the punctuation to be independent of casing and a conditional dependence of the truecase of a word on punctuation given the learned input representations. Our plausible reasoning follows from this example sentence -\"She took dance classes. She had no natural grace or sense of rhythm.\". The word after the period is capitalized, which implies that punctuation information can help in better prediction of casing. A pair of punctuation and truecasing is assigned per word:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modeling : Conditional Joint labeling of Punctuation + Casing", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Pr(p 1:T , c 1:T |x 1:T ) = Pr(p 1:T |x 1:T )Pr(c 1:T |p 1:T , x 1:T )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Modeling : Conditional Joint labeling of Punctuation + Casing", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where c i \u2208 C, a fixed set of casing labels {Lower Case, Upper Case, All Caps, Mixed Case}, and p i \u2208 P , a fixed set of punctuation labels {Comma, Period, Question Mark, No Punct}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modeling : Conditional Joint labeling of Punctuation + Casing", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We propose to use a pretrained model like BERT, trained on a large text corpus, as a lexical encoder for learning an effective representation of the input utterance. Figure 1 illustrates our proposed model architecture.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 174, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Subword embeddings Given a sequence of input vectors (x 1 , x 2 , ..., x T ), where x i represents a word w i , we extract the subword embeddings (s 1 , s 2 , ..., s n ) using a wordpiece tokenizer (Schuster and Nakajima, 2012). Using subwords is especially effective in medical domain, as it contains more compound words with common subwords. For example consider the six words {hypotension, hypertension, hypoactive, hyperactive, active, tension } with four common subwords {hyper, hypo, active, tension}. In Section 4.2, we provide a comparative analysis of word and subword models across different architectures on medical data. BERT encoder We provide subword embeddings (s 1 , s 2 , ..., s n ) as input to the BERT encoder, which outputs a sequence of hidden states: H = (h1, ..., h n ) at its final layer. The pretrained BERT base encoder consists of 12 transformer encoder selfattention layers. For this task, we truncate the BERT encoder and fine-tune only the first six layers to reduce the model complexity. Although a deep encoder might enable us to learn a long memory context dependent representation of the input utterance, the performance gain is very minimal compared to the increased latency 1 . For punctuation, we input the last layer representations of truncated BERT encoder h 1 , h 2 , ..., h n to a linear layer with softmax activation to 1 We experimentally found that 12-layer BERT base model gives \u223c1% improvement over 6-layer BERT base model whereas the inference and training times were double for the former.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1363, |
|
"end": 1364, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "classify over the punctuation labels generating (p 1 , p 2 , ..., p n ) as outputs. For casing, we concatenate the softmax probabilities of punctuation output with BERT encoder's outputs and feed to a linear layer with softmax activation generating case labels (c 1 , c 2 , ..., c n ) for the sequence. The softmax output for punctuation (p i ) and truecasing (\u0109 i ) is as follows:p", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "i = sof tmax(W k h i + b k )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "c i = sof tmax(W l (p i \u2295 h i ) + b l )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where W k , b k denote weights and bias of punctuation linear output layer and W l , b l denote weights and bias of truecasing linear output layer. Joint learning objective: We model our learning objective to maximize the joint probability Pr(p 1:T , c 1:T |x 1:T ). The model is finetuned endto-end to minimize the cross-entropy loss between the assigned distribution and the training data. The parameters of BERT encoder are shared across punctuation and casing prediction tasks and are jointly trained. We compute the losses (L p , L c ) for each task using cross entropy loss function. The final loss L to be optimized is a weighted average of the task-specific loses:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L = \u03b1L p + L c", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where \u03b1 is a fixed weight optimized for best predictions across both the tasks. In our experiments, we explored \u03b1 values in the range of (0.2-2) and found 0.6 to be the optimal value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained lexical encoder", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Model with Medical domain data BERT and its successors have shown great performance on downstream NLP tasks. But just like any other model, these Language Models are biased by their training data. In particular, they are typically trained on data that is easily available in large quantities on the internet e.g. Wikipedia, Common-Crawl etc. Our domain, Medical ASR Text, is not \"common\" and is very under-represented in the training data for these Language Models. One way to correct this situation is to perform a few steps of unsupervised Masked Language Model finetuning on the BERT models before performing cross-entropy training using the labeled task data (Han and Eisenstein, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 663, |
|
"end": 689, |
|
"text": "(Han and Eisenstein, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Finetuning using Masked Language", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We finetune the pretrained BERT model for MLM (Masked LM) objective on medical domain data. 15% of input tokens are masked randomly before feeding into the BERT model as proposed by (Devlin et al., 2018) . The main goal is to adapt and learn better representations of speech data. The domain adapted model can be further finetuned with an additional layer to a downstream task like punctuation and casing prediction. Domain+Task adaptation Building on the previous technique, we attempt to finetune the pretrained model for task adaptation in combination with domain adaptation. In this technique, instead of randomly masking 15% of the input tokens, we do selective masking i.e. 50% of the masked tokens would be random and the other 50% would be punctuation marks ([\".\", \",\", \"?\"] in our case). Therefore, the finetuned model would not only adapt to speech domain, but would also effectively learn the placement of punctuation marks in a text based on the context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 203, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain adaptation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Models trained on ground truth text inputs may not perform well when tested with ASR output, especially when the system introduces grammatical errors. To make models more robust against ASR errors, we perform data augmentation with ASR outputs for training. For punctuation restoration, we use edit distance measure to align ASR hypothesis with ground truth punctuated text. Before computing alignment, we strip all punctuation from ground truth and lowercase the text. This helps us find the best alignment between ASR hy-pothesis and ground truth text. Once the alignment is found, we restore the punctuation from each word in ground truth text to hypothesis. If there are words that are punctuated in ground truth but got deleted in ASR hypothesis, we restore the punctuation to previous word. For truecasing, we try to match the reference word with hypothesis word from aligned sequences with a window size of 5, two words to the left and two words to the right of current word and restore truecasing only in the cases where reference word is found. We performed experiments with data augmentation using 1-best hypothesis and n-best lists as additional training data and the results are reported in Section 4.4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Robustness to ASR errors", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "4 Experiments and results", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Robustness to ASR errors", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We evaluate our proposed framework and models on a subset of two internal medical datasets: dictation and conversational. The dictation corpus contains 3.7M words and the conversational corpus contains 51M words. The medical data comes with special tags masking personal identifiable and patient health information. We also use a general domain Wikipedia dataset for comparative analysis with Medical domain data. This data is a subset of the publicly available release of Wiki dataset (Sproat and Jaitly, 2016) . The corpus contains 35M words and relatively shorter sentences ranging from 8 to 200 words in length. 90% of the data from each corpus is used for training, 5% for fine-tuning and remaining 5% is held-out for testing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 511, |
|
"text": "(Sproat and Jaitly, 2016)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For robustness experiments presented in Section 4.4, we used data from the dictation corpus consisting of 2265 text files and corresponding audio files with an average duration of \u223c15 minutes. The total length of the corpus is 550 hours. For augmentation with ground-truth transcription, we transcribed audio files using a speech recognition system. Restoration of punctuation and truecasing to transcribed text can be erroneous as the word error rate(WER) goes up. We therefore discarded the transcribed text of those audio files whose WER is more than 25%. We sorted the remaining transcriptions based on WER to make further splits: hypothesis from top 50 files with best WER is set as test data, and the next 50 files were chosen as development and rest of the transcribed text was used for training. The partition was done this way to minimize the number of errors that may occur during restoration.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Preprocessing long-speech transcriptions Conversational style speech has long-speech transcripts, in which the context is spread across multiple segments. we use an overlapped chunking and merging component to pre and post process the data. We use a sliding window approach (Nguyen et al., 2019a) to split long ASR outputs into chunks of 200 words each with an overlapping window of 50 words each to the left and right. The overlap helps in preserving the context for all the words after splitting and ensures accurate prediction of punctuation and case corresponding to each word.", |
|
"cite_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 296, |
|
"text": "(Nguyen et al., 2019a)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For a fair comparison with BERT, we evaluate various recurrent and non-recurrent architectures with both word and subword embeddings. The two recurrent models include a 3 layer uni-directional LSTM (3-LSTM) and a 3 layer Bi-directional LSTM (3-BLSTM). One of the non recurrent encoders, implements a CNN-Highway architecture based on the work proposed by (Kim et al., 2016) ,", |
|
"cite_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 373, |
|
"text": "(Kim et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Large Vocabulary: Word vs Subword models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "whereas the other one implements a transformer encoder based model (Vaswani et al., 2017) . We train all four models on medical data from dictation and conversation corpus with weights initialized randomly. The vocabulary for word models is derived by considering all the unique words from training corpus, with additional tokens for unknown and padding. This yielded a vocabulary size of 30k for dictation and 64k for conversational corpus. Subwords are extracted using a wordpiece model (Schuster and Nakajima, 2012) and its inventory is less than half that of word model for conversation. Tables 1 and 2 summarize our results on dictation and conversation datasets respectively. We observe that subword models consistently performed same or better than word models. On punctuation task, for Full stop and Comma, we notice an absolute \u223c1-2% improvement respectively on dictation set. Similarly, on the conversation dataset, we notice an absolute \u223c1-2% improvement on Full stop, Comma and Question Mark. For the casing task, we notice that word and subword models performed equally well except in dictation dataset where we see an absolute \u223c3% improvement for Upper Case. Table 4 : Comparison of F1 scores for punctuation and truecasing using BERT and BLSTM when trained on Wiki data and Medical conversation data (FT-BERT: Finetuned BERT for domain adapation, PM-BERT: Finetuned BERT by punctuation masking for domain and task adapation).", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 89, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 518, |
|
"text": "(Schuster and Nakajima, 2012)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1173, |
|
"end": 1180, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Large Vocabulary: Word vs Subword models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We hypothesize that medical vocabulary contains a large set of compound words, which a subword based model works effectively over word model. Upon examining few utterances, we noticed that subword models can learn effective representations of these compound medical words by tokenizing them into subwords. On the other hand, word models often run into rare word or OOV issues.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Large Vocabulary: Word vs Subword models", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Significance of in-domain data For analyzing the importance of in-domain data, we train a baseline BLSTM model and a pretrained BERT model on Wiki and Medical data from both dictation and conversational corpus and tested the models on Medical held-out data. The first four rows of Tables 3 and 4 summarize the results. The models trained on Wiki data performed very poorly when compared to models trained on Medical data from either dictation or conversation corpus. Although dictation corpus (3.7M words) is relatively smaller than Wiki corpus (35M words), the difference in accuracy is significantly higher across both models. Imbalanced classes like Full stop, Comma, Question Mark were most affected. Another interesting observation is that the models trained on Medical data performed better on Full stop compared to Comma; whereas general domain models performed better on Comma compared to Full stop. The degradation in general models might be due to Wiki sentences being short and ending with a Full stop unlike lengthy medical transcripts. Also, the F1 scores are lower on conversation data across both the tasks, indicating the complexity involved in modeling conversational data due to their highly unstructured format. Overall, the pretrained BERT model consistently outperformed baseline BLSTM model on both dictation and conversation data. This motivated us to focus on adapting the pretrained models for this task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 281, |
|
"end": 295, |
|
"text": "Tables 3 and 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretrained language models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Finetuning Masked LM We have run two levels of fine-tuning as explained in Section 3.2. First, we finetuned BERT with Medical domain data using random masking (FT-BERT) and for task adaptation, we performed fine-tuning with punctuation based masking (PM-BERT). For both experiments, we used the same data as we have used for finetuning the downstream task. From the results presented in Table 3 and 4, we infer that finetuning boosts the performance of punctuation and truecasing (an absolute improvement of \u223c1-2%). From both the datasets, it is clear that task specific masking helps better than simple random masking. For dictation dataset, Full stop improved by an absolute 3% by performing punctuation specific masking, suggesting that finetuning MLM can give higher benefits when the amount of data is low. Variants of BERT We compare three pretrained models namely, BERT and its successor RoBERTa (Liu et al., 2019) and Bio-BERT (Lee et al., 2020) which was trained on large scale Biomedical corpora. The results are summarized in last two rows of Table 3 and 4. First, we observe that both Bio-BERT and RoBERTa outperformed the initial BERT model and has shown an absolute \u223c3-5% improvement over the baseline 3-BSLTM. To further validate this, we extended our experiments to understand how the performance of our best model(Bio-BERT) varies across different training dataset sizes compared to the baseline. From Figure 2 , we observe that the difference increases significantly as we move towards smaller datasets. For the smallest data set size of 500k words (1k transcripts), there is an absolute improvement of 6-17% over the baseline in accuracy in terms of F1. This shows that pretraining on a large dataset helps to overcome data scarcity issue effectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 903, |
|
"end": 921, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 935, |
|
"end": 953, |
|
"text": "(Lee et al., 2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 387, |
|
"end": 394, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1054, |
|
"end": 1061, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1419, |
|
"end": 1427, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretrained language models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For testing robustness, we performed experiments with augmentation of ASR data from n-best lists (BERT-ASR). We considered top-1, top-3 and top-5 hypotheses for n-best lists augmentation with ground truth text and the results are presented in Table 5 . Additionally, the best BERT model trained using only ground truth text inputs (BERT-GT) from Table 3 is also evaluated on ASR outputs. To compute F1 scores on held-out test set, we first aligned the ASR hypothesis with ground truth data and restored the punctuation and truecasing as described in Section 3.3. From the results presented in Table 5 , we infer that adding ASR hypothesis to the training data helped improve the performance of both punctuation and truecasing. In punctuation, both Full stop and Comma have seen an absolute 10% improvement in F1 score. Although the number of question marks is less in test data, the augmented systems performed really well compared to the system trained purely on ground truth text. However, we found that using n-best lists with n > 1 did not help much compared to the 1-best list. This may be due to sub-optimal restoration of punctuation and truecasing as the WER with n-best lists is likely to go up as n increases.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 250, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 346, |
|
"end": 353, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 593, |
|
"end": 600, |
|
"text": "Table 5", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Robustness", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In this paper, we have presented a framework for conditional joint modeling of punctuation and truecasing in medical transcriptions using pretrained language models such as BERT. We also demonstrated the benefit from MLM objective finetuning of the pretrained model with task specific masking. We further improved the robustness of punctuation and truecasing on ASR outputs by data augmentation during training. Experiments performed on both dictation and conversation corpora show the effectiveness of the proposed approach. Future work includes the use of either pretrained acoustic features or pretrained acoustic encoder to perform fusion with pretrained linguistic encoder to further boost the performance of punctuation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Cyberpunc: A lightweight punctuation annotation system for speech", |
|
"authors": [ |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Beeferman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Berger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 1998 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP'98 (Cat. No. 98CH36181)", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "689--692", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Doug Beeferman, Adam Berger, and John Lafferty. 1998. Cyberpunc: A lightweight punctuation an- notation system for speech. In Proceedings of the 1998 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP'98 (Cat. No. 98CH36181), volume 2, pages 689-692. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Neural word decomposition models for abusive language detection", |
|
"authors": [ |
|
{ |
|
"first": "Sravan", |
|
"middle": [], |
|
"last": "Bodapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spandana", |
|
"middle": [], |
|
"last": "Gella", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kasturi", |
|
"middle": [], |
|
"last": "Bhattacharjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Al-Onaizan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Third Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "135--145", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3515" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sravan Bodapati, Spandana Gella, Kasturi Bhattachar- jee, and Yaser Al-Onaizan. 2019. Neural word de- composition models for abusive language detection. In Proceedings of the Third Workshop on Abusive Language Online, pages 135-145, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Speech recognition for medical conversations", |
|
"authors": [ |
|
{ |
|
"first": "Chung-Cheng", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anshuman", |
|
"middle": [], |
|
"last": "Tripathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Chou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Co", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Jaunzeikare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anjuli", |
|
"middle": [], |
|
"last": "Kannan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hasim", |
|
"middle": [], |
|
"last": "Sak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananth", |
|
"middle": [], |
|
"last": "Sankar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1711.07274" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chung-Cheng Chiu, Anshuman Tripathi, Katherine Chou, Chris Co, Navdeep Jaitly, Diana Jaunzeikare, Anjuli Kannan, Patrick Nguyen, Hasim Sak, Ananth Sankar, et al. 2017. Speech recognition for medical conversations. arXiv preprint arXiv:1711.07274.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Combination of nn and crf models for joint detection of punctuation and disfluencies", |
|
"authors": [ |
|
{ |
|
"first": "Eunah", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Kilgour", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Sixteenth annual conference of the international speech communication association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunah Cho, Kevin Kilgour, Jan Niehues, and Alex Waibel. 2015. Combination of nn and crf models for joint detection of punctuation and disfluencies. In Sixteenth annual conference of the international speech communication association.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Segmentation and punctuation prediction in speech language translation using a monolingual translation system", |
|
"authors": [ |
|
{ |
|
"first": "Eunah", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Niehues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "International Workshop on Spoken Language Translation (IWSLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eunah Cho, Jan Niehues, and Alex Waibel. 2012. Seg- mentation and punctuation prediction in speech lan- guage translation using a monolingual translation system. In International Workshop on Spoken Lan- guage Translation (IWSLT) 2012.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Punctuation annotation using statistical prosody models", |
|
"authors": [ |
|
{ |
|
"first": "Heidi", |
|
"middle": [], |
|
"last": "Christensen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshihiko", |
|
"middle": [], |
|
"last": "Gotoh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Renals", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ISCA tutorial and research workshop (ITRW) on prosody in speech recognition and understanding", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heidi Christensen, Yoshihiko Gotoh, and Steve Re- nals. 2001a. Punctuation annotation using statisti- cal prosody models. In ISCA tutorial and research workshop (ITRW) on prosody in speech recognition and understanding.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Punctuation annotation using statistical prosody models", |
|
"authors": [ |
|
{ |
|
"first": "Heidi", |
|
"middle": [], |
|
"last": "Christensen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshihiko", |
|
"middle": [], |
|
"last": "Gotoh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Renals", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ISCA tutorial and research workshop (ITRW) on prosody in speech recognition and understanding", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Heidi Christensen, Yoshihiko Gotoh, and Steve Re- nals. 2001b. Punctuation annotation using statisti- cal prosody models. In ISCA tutorial and research workshop (ITRW) on prosody in speech recognition and understanding.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Automated production of truecased punctuated subtitles for weather and news broadcasts", |
|
"authors": [ |
|
{ |
|
"first": "Joris", |
|
"middle": [], |
|
"last": "Driesen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Grimsey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saeid", |
|
"middle": [], |
|
"last": "Safarfashandi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juliet", |
|
"middle": [], |
|
"last": "Gauthier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Simpson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Renals", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Fifteenth Annual Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joris Driesen, Alexandra Birch, Simon Grimsey, Saeid Safarfashandi, Juliet Gauthier, Matt Simpson, and Steve Renals. 2014. Automated production of true- cased punctuated subtitles for weather and news broadcasts. In Fifteenth Annual Conference of the International Speech Communication Association.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Medical speech recognition: reaching parity with humans", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Edwards", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wael", |
|
"middle": [], |
|
"last": "Salloum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Finley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Fone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Cardiff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Suendermann-Oeft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Speech and Computer", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "512--524", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Edwards, Wael Salloum, Greg P Finley, James Fone, Greg Cardiff, Mark Miller, and David Suendermann-Oeft. 2017. Medical speech recogni- tion: reaching parity with humans. In International Conference on Speech and Computer, pages 512- 524. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Restoring punctuation and capitalization in transcribed speech", |
|
"authors": [ |
|
{ |
|
"first": "Agustin", |
|
"middle": [], |
|
"last": "Gravano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Jansche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michiel", |
|
"middle": [], |
|
"last": "Bacchiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "2009 IEEE International Conference on Acoustics, Speech and Signal Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4741--4744", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agustin Gravano, Martin Jansche, and Michiel Bacchi- ani. 2009. Restoring punctuation and capitalization in transcribed speech. In 2009 IEEE International Conference on Acoustics, Speech and Signal Pro- cessing, pages 4741-4744. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Unsupervised domain adaptation of contextualized embeddings for sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Xiaochuang", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4229--4239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaochuang Han and Jacob Eisenstein. 2019. Unsu- pervised domain adaptation of contextualized em- beddings for sequence labeling. In Proceedings of the 2019 Conference on Empirical Methods in Nat- ural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4229-4239.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Maximum entropy model for punctuation annotation from speech", |
|
"authors": [ |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Seventh International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jing Huang and Geoffrey Zweig. 2002. Maximum en- tropy model for punctuation annotation from speech. In Seventh International Conference on Spoken Lan- guage Processing.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Character-aware neural language models", |
|
"authors": [ |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Sontag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander M", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2741--2749", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoon Kim, Yacine Jernite, David Sontag, and Alexan- der M Rush. 2016. Character-aware neural language models. In AAAI, pages 2741-2749.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Sequence-to-sequence models for punctuated transcription combining lexical and acoustic features", |
|
"authors": [ |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Klejch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Bell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Renals", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5700--5704", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ond\u0159ej Klejch, Peter Bell, and Steve Renals. 2017. Sequence-to-sequence models for punctuated tran- scription combining lexical and acoustic features. In 2017 IEEE International Conference on Acous- tics, Speech and Signal Processing (ICASSP), pages 5700-5704. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Bioinformatics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomed- ical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "The effect of pitch, intensity and pause duration in punctuation detection", |
|
"authors": [ |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Silber-Varod", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ami", |
|
"middle": [], |
|
"last": "Moyal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "2012 IEEE 27th Convention of Electrical and Electronics Engineers in Israel", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--4", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tal Levy, Vered Silber-Varod, and Ami Moyal. 2012. The effect of pitch, intensity and pause duration in punctuation detection. In 2012 IEEE 27th Conven- tion of Electrical and Electronics Engineers in Is- rael, pages 1-4. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Better punctuation prediction with dynamic conditional random fields", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Lu and Hwee Tou Ng. 2010. Better punctuation prediction with dynamic conditional random fields. In Proceedings of the 2010 conference on empirical methods in natural language processing, pages 177- 186.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The effects of speech recognition and punctuation on information extraction performance", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Baron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Bulyko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lance", |
|
"middle": [], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Stallard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Ninth European Conference on Speech Communication and Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Makhoul, Alex Baron, Ivan Bulyko, Long Nguyen, Lance Ramshaw, David Stallard, Richard Schwartz, and Bing Xiang. 2005. The effects of speech recognition and punctuation on information extraction performance. In Ninth European Confer- ence on Speech Communication and Technology.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Fast and accurate capitalization and punctuation for automatic speech recognition using transformer and chunk merging", |
|
"authors": [ |
|
{ |
|
"first": "Binh", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bao", |
|
"middle": [ |
|
"Hung" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hien", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "The-Loc", |
|
"middle": [], |
|
"last": "Phuong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luong Chi", |
|
"middle": [], |
|
"last": "Truong Do", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.02404" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Binh Nguyen, Vu Bao Hung Nguyen, Hien Nguyen, Pham Ngoc Phuong, The-Loc Nguyen, Quoc Truong Do, and Luong Chi Mai. 2019a. Fast and accurate capitalization and punctuation for automatic speech recognition using transformer and chunk merging. arXiv preprint arXiv:1908.02404.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Fast and accurate capitalization and punctuation for automatic speech recognition using transformer and chunk merging", |
|
"authors": [ |
|
{ |
|
"first": "Binh", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bao", |
|
"middle": [ |
|
"Hung" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hien", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "The-Loc", |
|
"middle": [], |
|
"last": "Phuong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luong Chi", |
|
"middle": [], |
|
"last": "Truong Do", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.02404" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Binh Nguyen, Vu Bao Hung Nguyen, Hien Nguyen, Pham Ngoc Phuong, The-Loc Nguyen, Quoc Truong Do, and Luong Chi Mai. 2019b. Fast and accurate capitalization and punctuation for automatic speech recognition using transformer and chunk merging. arXiv preprint arXiv:1908.02404.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Joint learning of correlated sequence labelling tasks using bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Vardaan", |
|
"middle": [], |
|
"last": "Pahuja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirban", |
|
"middle": [], |
|
"last": "Laha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shachar", |
|
"middle": [], |
|
"last": "Mirkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vikas", |
|
"middle": [], |
|
"last": "Raykar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Kotlerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Lev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1703.04650" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vardaan Pahuja, Anirban Laha, Shachar Mirkin, Vikas Raykar, Lili Kotlerman, and Guy Lev. 2017. Joint learning of correlated sequence labelling tasks us- ing bidirectional recurrent neural networks. arXiv preprint arXiv:1703.04650.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Modeling punctuation prediction as machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Peitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arne", |
|
"middle": [], |
|
"last": "Mauser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "International Workshop on Spoken Language Translation (IWSLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephan Peitz, Markus Freitag, Arne Mauser, and Her- mann Ney. 2011a. Modeling punctuation prediction as machine translation. In International Workshop on Spoken Language Translation (IWSLT) 2011.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Modeling punctuation prediction as machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Peitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arne", |
|
"middle": [], |
|
"last": "Mauser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "International Workshop on Spoken Language Translation (IWSLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephan Peitz, Markus Freitag, Arne Mauser, and Her- mann Ney. 2011b. Modeling punctuation prediction as machine translation. In International Workshop on Spoken Language Translation (IWSLT) 2011.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Deep learning for punctuation restoration in medical reports", |
|
"authors": [ |
|
{ |
|
"first": "Wael", |
|
"middle": [], |
|
"last": "Salloum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Finley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Edwards", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Suendermann-Oeft", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wael Salloum, Gregory Finley, Erik Edwards, Mark Miller, and David Suendermann-Oeft. 2017. Deep learning for punctuation restoration in medical re- ports. In BioNLP 2017, pages 159-164.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Japanese and korean voice search", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaisuke", |
|
"middle": [], |
|
"last": "Nakajima", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5149--5152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In 2012 IEEE Interna- tional Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5149-5152. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Neural machine translation of rare words with subword units", |
|
"authors": [ |
|
{ |
|
"first": "Rico", |
|
"middle": [], |
|
"last": "Sennrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barry", |
|
"middle": [], |
|
"last": "Haddow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Birch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.07909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Rnn approaches to text normalization: A challenge", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Sproat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navdeep", |
|
"middle": [], |
|
"last": "Jaitly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.00068" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Sproat and Navdeep Jaitly. 2016. Rnn ap- proaches to text normalization: A challenge. arXiv preprint arXiv:1611.00068.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Automatic detection of sentence boundaries and disfluencies based on recognized words", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Bates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madelaine", |
|
"middle": [], |
|
"last": "Plauche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Fifth International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Stolcke, Elizabeth Shriberg, Rebecca Bates, Mari Ostendorf, Dilek Hakkani, Madelaine Plauche, Gokhan Tur, and Yu Lu. 1998. Automatic detec- tion of sentence boundaries and disfluencies based on recognized words. In Fifth International Confer- ence on Spoken Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Lstm for punctuation restoration in speech transcripts", |
|
"authors": [ |
|
{ |
|
"first": "Ottokar", |
|
"middle": [], |
|
"last": "Tilk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanel", |
|
"middle": [], |
|
"last": "Alum\u00e4e", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Sixteenth annual conference of the international speech communication association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ottokar Tilk and Tanel Alum\u00e4e. 2015. Lstm for punctu- ation restoration in speech transcripts. In Sixteenth annual conference of the international speech com- munication association.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Bidirectional recurrent neural network with attention mechanism for punctuation restoration", |
|
"authors": [ |
|
{ |
|
"first": "Ottokar", |
|
"middle": [], |
|
"last": "Tilk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanel", |
|
"middle": [], |
|
"last": "Alum\u00e4e", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3047--3051", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ottokar Tilk and Tanel Alum\u00e4e. 2016. Bidirectional recurrent neural network with attention mechanism for punctuation restoration. In Interspeech, pages 3047-3051.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Improved models for automatic punctuation prediction for spoken and written text", |
|
"authors": [ |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Ueffing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maximilian", |
|
"middle": [], |
|
"last": "Bisani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Vozila", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3097--3101", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicola Ueffing, Maximilian Bisani, and Paul Vozila. 2013. Improved models for automatic punctuation prediction for spoken and written text. In Inter- speech, pages 3097-3101.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5754--5764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5754-5764.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Self-attention based model for punctuation prediction using word and speech embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Jiangyan", |
|
"middle": [], |
|
"last": "Yi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianhua", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7270--7274", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiangyan Yi and Jianhua Tao. 2019. Self-attention based model for punctuation prediction using word and speech embeddings. In Proc. ICASSP, pages 7270-7274.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Pre-trained BERT encoder for prediction of punctuation and truecasing." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Difference in F1 scores between Bio-BERT and BLSTM for varying data sizes." |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"text": "Comparison of F1 scores for punctuation and truecasing with ground truth and ASR augmented data.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>F1 Difference b/w Bio-BERT & BLSTM</td><td>0.02 0.04 0.06 0.08 0.10 0.12 0.14 0.16 0.18</td><td>Dict-Comma Dict-Fullstop Dict-Uppercase Conv-Comma Conv-Fullstop Conv-Uppercase</td><td/><td/></tr><tr><td/><td>0.00</td><td>40k</td><td>20k Corpus size 10k</td><td>5k</td><td>1k</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |