|
{ |
|
"paper_id": "N04-1017", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:44:08.687174Z" |
|
}, |
|
"title": "Lattice-Based Search for Spoken Utterance Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Murat", |
|
"middle": [], |
|
"last": "Saraclar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "AT&T Labs -Research", |
|
"location": { |
|
"addrLine": "180 Park Ave. Florham Park", |
|
"postCode": "07932", |
|
"region": "NJ" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Sproat", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Illinois at Urbana-Champaign Urbana", |
|
"location": { |
|
"postCode": "61801", |
|
"region": "IL" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recent work on spoken document retrieval has suggested that it is adequate to take the singlebest output of ASR, and perform text retrieval on this output. This is reasonable enough for the task of retrieving broadcast news stories, where word error rates are relatively low, and the stories are long enough to contain much redundancy. But it is patently not reasonable if one's task is to retrieve a short snippet of speech in a domain where WER's can be as high as 50%; such would be the situation with teleconference speech, where one's task is to find if and when a participant uttered a certain phrase. In this paper we propose an indexing procedure for spoken utterance retrieval that works on lattices rather than just single-best text. We demonstrate that this procedure can improve F scores by over five points compared to singlebest retrieval on tasks with poor WER and low redundancy. The representation is flexible so that we can represent both word lattices, as well as phone lattices, the latter being important for improving performance when searching for phrases containing OOV words.", |
|
"pdf_parse": { |
|
"paper_id": "N04-1017", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recent work on spoken document retrieval has suggested that it is adequate to take the singlebest output of ASR, and perform text retrieval on this output. This is reasonable enough for the task of retrieving broadcast news stories, where word error rates are relatively low, and the stories are long enough to contain much redundancy. But it is patently not reasonable if one's task is to retrieve a short snippet of speech in a domain where WER's can be as high as 50%; such would be the situation with teleconference speech, where one's task is to find if and when a participant uttered a certain phrase. In this paper we propose an indexing procedure for spoken utterance retrieval that works on lattices rather than just single-best text. We demonstrate that this procedure can improve F scores by over five points compared to singlebest retrieval on tasks with poor WER and low redundancy. The representation is flexible so that we can represent both word lattices, as well as phone lattices, the latter being important for improving performance when searching for phrases containing OOV words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Automatic systems for indexing, archiving, searching and browsing of large amounts of spoken communications have become a reality in the last decade. Most such systems use an automatic speech recognition (ASR) component to convert speech to text which is then used as an input to a standard text based information retrieval (IR) component. This strategy works reasonably well when speech recognition output is mostly correct or the docu-ments are long enough so that some occurrences of the query terms are recognized correctly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most of the research has concentrated on retrieval of Broadcast News type of spoken documents where speech is relatively clean and the documents are relatively long. In addition it is possible to find large amounts of text with similar content in order to build better language models and enhance retrieval through use of similar documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We are interested in extending this to telephone conversations and teleconferences. Our task is locating occurrences of a query in spoken communications to aid browsing. This is not exactly spoken document retrieval. In fact, it is more similar to word spotting. Each document is a short segment of audio.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although reasonable retrieval performance can be obtained using the best ASR hypothesis for tasks with moderate (\u223c 20%) word error rates, tasks with higher (40 \u2212 50%) word error rates require use of multiple ASR hypotheses. Use of ASR lattices makes the system more robust to recognition errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Almost all ASR systems have a closed vocabulary. This restriction comes from run-time requirements as well as the finite amount of data used for training the language models of the ASR systems. Typically the recognition vocabulary is taken to be the words appearing in the language model training corpus. Sometimes the vocabulary is further reduced to only include the most frequent words in the corpus. The words that are not in this closed vocabulary -the out of vocabulary (OOV) words -will not be recognized by the ASR system, contributing to recognition errors. The effects of OOV words in spoken document retrieval are discussed by Woodland et al. (2000) . Using phonetic search helps retrieve OOV words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 638, |
|
"end": 660, |
|
"text": "Woodland et al. (2000)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper is organized as follows. In Section 2 we give an overview of related work, focusing on methods dealing with speech recognition errors and OOV queries. We present the methods used in this study in Section 3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Experimental setup and results are given in Section 4. Finally, our conclusions are presented in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are commercial systems including Nexidia/Fast-Talk (www.nexidia.com),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Virage/AudioLogger (www.virage.com), Convera (www.convera.com) as well as research systems like AT&T DVL (Cox et al., 1998) , AT&T ScanMail (Hirschberg et al., 2001) , BBN Rough'n'Ready (Makhoul et al., 2000) , CMU Informedia (www.informedia.cs.cmu.edu), SpeechBot (www.speechbot.com), among others.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 123, |
|
"text": "(Cox et al., 1998)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 165, |
|
"text": "(Hirschberg et al., 2001)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 208, |
|
"text": "(Makhoul et al., 2000)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Also between 1997 and 2000, the Test REtrieval Conference (TREC) had a spoken document retrieval (SDR) track with many participants (Garofolo et al., 2000) . NIST TREC-9 SDR Web Site (2000) states that:", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 155, |
|
"text": "(Garofolo et al., 2000)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The results of the TREC-9 2000 SDR evaluation presented at TREC on November 14, 2000 showed that retrieval performance for sites on their own recognizer transcripts was virtually the same as their performance on the human reference transcripts. Therefore, retrieval of excerpts from broadcast news using automatic speech recognition for transcription was deemed to be a solved problem -even with word error rates of 30%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "PhD Theses written on this topic include James (1995) , Wechsler (1998) , Siegler (1999) and Ng (2000) . describe a system that combines a large vocabulary continuous speech recognition (LVCSR) system and a phone-lattice word spotter (WS) for retrieval of voice and video mail messages (Brown et al., 1996) . Witbrock and Hauptmann (1997) present a system where a phonetic transcript is obtained from the word transcript and retrieval is performed using both word and phone indices. present new techniques including a new method to detect occurrences of query features, a new method to estimate occurrence probabilities, a collection-wide probability re-estimation technique and feature length weighting. Srinivasan and Petkovic (2000) introduce a method for phonetic retrieval based on the probabilistic formulation of term weighting using phone confusion data. Amir et al. (2001) use indexing based on confusable phone groups and a Bayesian phonetic edit distance for phonetic speech retrieval. compare three indexing methods based on words, syllable-like particles, and phonemes to study the problem of OOV queries in audio indexing systems. Logan and Van Thong (2002) give an alternate approach to the OOV query problem by expanding query words into in-vocabulary phrases while taking acoustic confusability and language model scores into account.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 53, |
|
"text": "James (1995)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 56, |
|
"end": 71, |
|
"text": "Wechsler (1998)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 74, |
|
"end": 88, |
|
"text": "Siegler (1999)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 102, |
|
"text": "Ng (2000)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 306, |
|
"text": "(Brown et al., 1996)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 309, |
|
"end": 338, |
|
"text": "Witbrock and Hauptmann (1997)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 735, |
|
"text": "Srinivasan and Petkovic (2000)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 863, |
|
"end": 881, |
|
"text": "Amir et al. (2001)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1145, |
|
"end": 1171, |
|
"text": "Logan and Van Thong (2002)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Of the previous work, the most similar approach to the one proposed here is that of , in that they used phone lattices to aid in word spotting, in addition to single-best output from LVCSR. Our proposal might be thought of as a generalization of their approach in that we use lattices as the sole representation over which retrieval is performed. We believe that lattices are a more natural representation for retrieval in cases where there is a high degree of uncertainty about what was said, which is typically the case in LVCSR systems for conversational speech. We feel that our results, presented below, bear out this belief. Also novel in our approach is the use of indexed lattices allowing for efficient retrieval. As we note below, in the limit where one is using one-best output, the indexed lattices reduce to the normal inverted index used in text retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section we describe the overall structure of our system and give details of the techniques used in our investigations. The system consists of three main components. First, the ASR component is used to convert speech into a lattice representation, together with timing information. Second, this representation is indexed for efficient retrieval. These two steps are performed off-line. Finally, when the user enters a query the index is searched and matching audio segments are returned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use a state-of-the-art HMM based large vocabulary continuous speech recognition (LVCSR) system. The acoustic models consist of decision tree state clustered triphones and the output distributions are mixtures of Gaussians. The language models are pruned backoff trigram models. The pronunciation dictionaries contain few alternative pronunciations. Pronunciations that are not in our baseline pronunciation dictionary (including OOV query words) are generated using a text-to-speech (TTS) frontend. The TTS frontend can produce multiple pronunciations. The ASR systems used in this study are single pass systems. The recognition networks are represented as weighted finite state machines (FSMs).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Speech Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The output of the ASR system is also represented as an FSM and may be in the form of a best hypothesis string or a lattice of alternate hypotheses. The labels on the arcs of the FSM may be words or phones, and the conversion between the two can easily be done using FSM composition. The costs on the arcs are negative log likelihoods. Additionally, timing information can also be present in the output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Speech Recognition", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the case of lattices, we store a set of indices, one for each arc label (word or phone) l, that records the lat-tice number L[a], input-state k[a] of each arc a labeled with l in each lattice, along with the probability mass f (k[a]) leading to that state, the probability of the arc itself p(a|k [a] ) and an index for the next state. To retrieve a single label from a set of lattices representing a speech corpus one simply retrieves all arcs in each lattice from the label index. The lattices are first normalized by weight pushing (Mohri et al., 2002) so that the probability of the set of all paths leading from the arc to the final state is 1. After weight pushing, for a given arc a, the probability of the set of all paths containing that arc is given by", |
|
"cite_spans": [ |
|
{ |
|
"start": 300, |
|
"end": 303, |
|
"text": "[a]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 558, |
|
"text": "(Mohri et al., 2002)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "p(a) = \u03c0\u2208L:a\u2208\u03c0 p(\u03c0) = f (k[a])p(a|k[a])", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "namely the probability of all paths leading into that arc, multiplied by the probability of the arc itself. For a lattice L we construct a \"count\" C(l|L) for a given label l using the information stored in the index I(l) as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "C(l|L) = \u03c0\u2208L p(\u03c0)C(l|\u03c0) = \u03c0\u2208L p(\u03c0) a\u2208\u03c0 \u03b4(a, l) = a\u2208L \u03b4(a, l) \u03c0\u2208L:a\u2208\u03c0 p(\u03c0) = a\u2208I(l):L[a]=L p(a) = a\u2208I(l):L[a]=L f (k[a])p(a|k[a])", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where C(l|\u03c0) is the number of times l is seen on path \u03c0 and \u03b4(a, l) is 1 if arc a has the label l and 0 otherwise. Retrieval can be thresholded so that matches below a certain count are not returned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To search a multilabel expression (e.g. a multiword phrase) w 1 w 2 . . . w n we seek on each label in the expression, and then for each (w i , w i+1 ) join the output states of w i with the matching input states of w i+1 ; in this way we retrieve just those path segments in each lattice that match the entire multi-label expression. The probability of each match is de-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "fined as f (k[a 1 ])p(a 1 |k[a 1 ])p(a 2 |k[a 2 ]) . . . p(a n |k[a n ]), where p(a i |k[a i ])", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the probability of the ith arc in the expression starting in arc a 1 . The total \"count\" for the lattice is computed as defined above.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Note that in the limit case where each lattice is an unweighted single path -i.e. a string of labels -the above scheme reduces to a standard inverted index.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The count C(l|L) can be interpreted as a lattice-based confidence measure. Although it may be possible to use more sophisticated confidence measures, use of (posterior) probabilities allows for a simple factorization which makes indexing efficient.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lattice Indexing and Retrieval", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In order to deal with queries that contain OOV words we investigate the use of sub-word units for indexing. In this study we use phones as the sub-word units. There are two methods for obtaining phonetic representation of an input utterance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indexing Using Sub-word Units", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "1. Phone recognition using an ASR system where recognition units are phones. This is achieved by using a phone level language model instead of the word level language model used in the baseline ASR system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indexing Using Sub-word Units", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "2. Converting the word level representation of the utterance into a phone level representation. This is achieved by using the baseline ASR system and replacing each word in the output by its pronunciation(s) in terms of phones.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indexing Using Sub-word Units", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Both methods have their shortcomings. Phone recognition is known to be less accurate than word recognition. On the other hand, the second method can only generate phone strings that are substrings of the pronunciations of in-vocabulary word strings. An alternative is to use hybrid language models used for OOV word detection (Yazgan and Saraclar, 2004) . For retrieval, each query word is converted into phone string(s) by using its pronunciation(s). The phone index can then be searched for each phone string. Note that this approach will generate many false alarms, particularly for short query words, which are likely to be substrings of longer words. In order to control for this a bound on minimum pronunciation length can be utilized. Since most short words are in vocabulary this bound has little effect on recall.", |
|
"cite_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 353, |
|
"text": "Saraclar, 2004)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Indexing Using Sub-word Units", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Given a word index and a sub-word index, it is possible to improve the retrieval performance of the system by using both indices. There are many strategies for doing this.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Both Word and Sub-word Indices", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Search both the word index and the sub-word index, combine the results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "combination:", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Search the word index for in-vocabulary queries, search the sub-word index for OOV queries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "vocabulary cascade:", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Search the word index, if no result is returned search the sub-word index.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In the first case, if the indices are obtained from ASR best hypotheses, then the result combination is a simple union of the separate sets of results. However, if indices are obtained from lattices, then in addition to taking a union of results, retrieval can be done using a combined score. Given a query q, let C w (q) and C p (q) be the lattice counts obtained from the word index and the phone index respectively. We also define the normalized lattice count for the phone index as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "C norm p (q) = (C p (q)) 1 |pron(q)|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "where |pron(q)| is the length of the pronunciation of query q. We then define the combined score to be", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "C wp (q) = C w (q) + \u03bbC norm p (q)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "where \u03bb is an empirically determined scaling factor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In the other cases, instead of using two different thresholds we use a single threshold on C w (q) and C norm p (q) during retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "search cascade:", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "For evaluating ASR performance we use the standard word error rate (WER) as our metric. Since we are interested in retrieval we use OOV rate by type to measure the OOV word characteristics. For evaluating retrieval performance we use precision and recall with respect to manual transcriptions. Let Correct(q) be the number of times the query q is found correctly, Answer(q) be the number of answers to the query q, and Reference(q) be the number of times q is found in the reference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Precision(q) = Correct(q) Answer(q) Recall(q) = Correct(q) Reference(q)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We compute precision and recall rates for each query and report the average over all queries. The set of queries Q consists of all the words seen in the reference except for a stoplist of 100 most common words. The measurement is not weighted by frequency -i.e. each query q \u2208 Q is presented to the system only once, independent of the number of occurences of q in the transcriptions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Precision = 1 |Q| q\u2208Q Precision(q) Recall = 1 |Q| q\u2208Q Recall(q)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For lattice based retrieval methods, different operating points can be obtained by changing the threshold. The precision and recall at these operating points can be plotted as a curve.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In addition to individual precision-recall values we also compute the F-measure defined as F = 2 \u00d7 Precision \u00d7 Recall Precision + Recall and report the maximum F-measure (maxF) to summarize the information in a precision-recall curve.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use three different corpora to assess the effectiveness of different retrieval techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The first corpus is the DARPA Broadcast News corpus consisting of excerpts from TV or radio programs including various acoustic conditions. The test set is the 1998 Hub-4 Broadcast News (hub4e98) evaluation test set (available from LDC, Catalog no. LDC2000S86) which is 3 hours long and was manually segmented into 940 segments. It contains 32411 word tokens and 4885 word types. For ASR we use a real-time system ). Since the system was designed for SDR, the recognition vocabulary of the system has over 200K words. The pronunciation dictionary has 1.25 pronunciations per word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The second corpus is the Switchboard corpus consisting of two party telephone conversations. The test set is the RT02 evaluation test set which is 5 hours long, has 120 conversation sides and was manually segmented into 6266 segments. It contains 65255 word tokens and 3788 word types. For ASR we use the first pass of the evaluation system (Ljolje et al., 2002) . The recognition vocabulary of the system has over 45K words. For these words the average number of pronunciations per word is 1.07.", |
|
"cite_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 362, |
|
"text": "(Ljolje et al., 2002)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The third corpus is named Teleconferences since it consists of multiparty teleconferences on various topics. The audio from the legs of the conference are summed and recorded as a single channel. A test set of six teleconferences (about 3.5 hours) was transcribed. It contains 31106 word tokens and 2779 word types. Calls are automatically segmented into a total of 1157 segments prior to ASR, using an algorithm that detects changes in the acoustics. We again use the first pass of the Switchboard evaluation system for ASR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In Table 1 we present the ASR performance on these three tasks as well as the OOV Rate by type of the corpora. It is important to note that the recognition vocabulary for the Switchboard and Teleconferences tasks are the same and no data from the Teleconferences task was used while building the ASR systems. The mismatch between the Teleconference data and the models trained on the Switchboard corpus contributes to the significant increase in WER.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpora", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As a baseline, we use the best word hypotheses of the ASR system for indexing and retrieval. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using ASR Best Word Hypotheses", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In the second set of experiments we investigate the use of ASR word lattices. In order to reduce storage requirements, lattices can be pruned to contain only the paths whose costs (i.e. negative log likelihood) are within a threshold with respect to the best path. The smaller this cost threshold is, the smaller the lattices and the index files are. In Figure 1 we present the precision-recall curves for different pruning thresholds on the Teleconferences task. Table 3 the resulting index sizes and maximum Fmeasure values are given. On the teleconferences task we observed that cost=6 yields good results, and used this value for the rest of the experiments. Note that this increases the index size with respect to the ASR 1-best case by 3 times for Broadcast News, by 5 times for Switchboard and by 9 times for Teleconferences.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 354, |
|
"end": 362, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 471, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using ASR Word Lattices", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Pruning ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Next, we compare using the two methods of phonetic transcription discussed in Section 3.3 -phone recognition and word-to-phone conversion -for retrieval using only phone lattices. In Table 4 the precision and recall values that yield the maximum F-measure as well as the maximum F-measure values are presented. These results clearly indicate that phone recognition is inferior for our purposes. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 190, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using ASR Phone Lattices", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "We investigated using the strategies mentioned in Section 3.4, and found strategy 3 -search the word index, if no result is returned search the phone index -to be superior to others. We give a comparison of the maximum F-values for the three strategies in Table 5 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 263, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using ASR Word and Phone Lattices", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "Strategy maxF 1.combination 50.5 2.vocabulary cascade 51.0 3.search cascade 52.8 Table 5 : Comparison of different strategies for using word and phone indices", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 88, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using ASR Word and Phone Lattices", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "In Figure 2 we present results for this strategy on the Teleconferences corpus. The phone indices used in these experiments were obtained by converting the word lattices into phone lattices. Using the phone indices obtained by phone recognition gave significantly worse results. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using ASR Word and Phone Lattices", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "When searching for words with short pronunciations in the phone index the system will produce many false alarms. One way of reducing the number of false alarms is to disallow queries with short pronunciations. In Figure 3 we show the effect of imposing a minimum pronunciation length for queries. For a query to be answered its pronunciation has to have more than minphone phones, otherwise no answers are returned. Best maximum Fmeasure result is obtained using minphone=3.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 219, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Minimum Pronunciation Length for Queries", |
|
"sec_num": "4.7" |
|
}, |
|
{ |
|
"text": "In Figure 4 we present results for different recognition vocabulary sizes (5k, 20k, 45k) on the Switchboard corpus. The OOV rates by type are 32%, 10% and 6% respectively. The word error rates are 41.5%, 40.1% and 40.1% respectively. The precision recall curves are almost the same for 20k and 45k vocabulary sizes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effects of Recognition Vocabulary Size", |
|
"sec_num": "4.8" |
|
}, |
|
{ |
|
"text": "So far, in all the experiments the query list consisted of single words. In order to observe the behavior of various methods when faced with longer queries we used a set of Figure 3: Effect of minimum pronunciation length using a word/phone hybrid strategy for teleconferences word pair queries. Instead of using all the word pairs seen in the reference transcriptions, we chose the ones which were more likely to occur together than with other words. For this, we sorted the word pairs (w 1 , w 2 ) according to their pointwise mutual information", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Word Pair Queries", |
|
"sec_num": "4.9" |
|
}, |
|
{ |
|
"text": "log p(w 1 , w 2 ) p(w 1 )p(w 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Word Pair Queries", |
|
"sec_num": "4.9" |
|
}, |
|
{ |
|
"text": "and used the top pairs as queries in our experiments. Note that in these experiments only the query set is changed and the indices remain the same as before.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Word Pair Queries", |
|
"sec_num": "4.9" |
|
}, |
|
{ |
|
"text": "As it turns out, the precision of the system is very high on this type of queries. For this reason, it is more interesting to look at the operating point that achieves the maximum F-measure for each technique, which in this case coincides with the point that yields the highest recall. In Table 6 we present results on the Switchboard corpus using 1004 word pair queries. Using word lattices it is possible to increase the recall of the system by 16.4% while degrading the precision by only 2.2%. Using phone lattices we can get another 3.7% increase in recall for 1.2% loss in precision. The final system still has 95% precision.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 296, |
|
"text": "Table 6", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using Word Pair Queries", |
|
"sec_num": "4.9" |
|
}, |
|
{ |
|
"text": "Precision ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, we make a comparison of various techniques on different tasks. In Table 7 maximum F-measure (maxF) is given. Using word lattices yields a relative gain of 3-5% in maxF over using best word hypotheses. For the final system that uses both word and phone lattices, the relative gain over the baseline increases to 8-12%. Table 7 : Maximum F-measure for various systems and tasks", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 82, |
|
"text": "Table 7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 334, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary of Results on Different Corpora", |
|
"sec_num": "4.10" |
|
}, |
|
{ |
|
"text": "In Figure 5 we present the precision recall curves. The gain from using better techniques utilizing word and phone lattices increases as retrieval performance gets worse.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary of Results on Different Corpora", |
|
"sec_num": "4.10" |
|
}, |
|
{ |
|
"text": "We proposed an indexing procedure for spoken utterance retrieval that works on ASR lattices rather than just single-best text. We demonstrated that this procedure can improve maximum F-measure by over five points compared to single-best retrieval on tasks with poor WER and low redundancy. The representation is flexible so that we can represent both word lattices, as well as phone lattices, the latter being important for improving performance when searching for phrases containing OOV Figure 5 : Precision Recall for various techniques on different tasks. The tasks are Broadcast News (+), Switchboard (x), and Teleconferences (o). The techniques are using best word hypotheses (single points), using word lattices (solid lines), and using word and phone lattices (dashed lines).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 488, |
|
"end": 496, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "words. It is important to note that spoken utterance retrieval for conversational speech has different properties than spoken document retrieval for broadcast news. Although consistent improvements were observed on a variety of tasks including Broadcast News, the procedure proposed here is most beneficial for more difficult conversational speech tasks like Switchboard and Teleconferences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Advances in phonetic word spotting", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Amir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Efrat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the Tenth International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "580--582", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Amir, A. Efrat, and S. Srinivasan. 2001. Advances in phonetic word spotting. In Proceedings of the Tenth International Conference on Information and Knowl- edge Management, pages 580-582, Atlanta, Georgia, USA.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Open-vocabulary speech indexing for voice and video mail retrieval", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Foote", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"J F" |
|
], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"Sparck" |
|
], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proc. ACM Multimedia 96", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "307--316", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. G. Brown, J. T. Foote, G. J. F. Jones, K. Sparck Jones, and S. J. Young. 1996. Open-vocabulary speech in- dexing for voice and video mail retrieval. In Proc. ACM Multimedia 96, pages 307-316, Boston, Novem- ber.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "On the application of multimedia processing to telecommunications", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Cox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Haskell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Shahraray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Rabiner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the IEEE", |
|
"volume": "86", |
|
"issue": "5", |
|
"pages": "755--824", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. V. Cox, B. Haskell, Y. LeCun, B. Shahraray, and L. Ra- biner. 1998. On the application of multimedia pro- cessing to telecommunications. Proceedings of the IEEE, 86(5):755-824, May.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The TREC spoken document retrieval track: A success story", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Garofolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Auzanne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Voorhees", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the Recherche d'Informations Assiste par Ordinateur: Content Based Multimedia Information Access Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Garofolo, G. Auzanne, and E. Voorhees. 2000. The TREC spoken document retrieval track: A success story. In Proceedings of the Recherche d'Informations Assiste par Ordinateur: Content Based Multimedia In- formation Access Conference.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Scanmail: Browsing and searching speech data by content", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bacchiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hindle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Isenhour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Stark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Stead", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Whittaker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Zamchick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the European Conference on Speech Communication and Technology (Eurospeech)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Hirschberg, M. Bacchiani, D. Hindle, P. Isenhour, A. Rosenberg, L. Stark, L. Stead, S. Whittaker, and G. Zamchick. 2001. Scanmail: Browsing and search- ing speech data by content. In Proceedings of the European Conference on Speech Communication and Technology (Eurospeech), Aalborg, Denmark.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The Application of Classical Information Retrieval Techniques to Spoken Documents", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"Anthony" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Anthony James. 1995. The Application of Classi- cal Information Retrieval Techniques to Spoken Docu- ments. Ph.D. thesis, University of Cambridge, Down- ing College.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Retrieving spoken documents by combining multiple index sources", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [ |
|
"J F" |
|
], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Foote", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"Sparck" |
|
], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proc. SIGIR 96", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. J. F. Jones, J. T. Foote, K. Sparck Jones, and S. J. Young. 1996. Retrieving spoken documents by com- bining multiple index sources. In Proc. SIGIR 96, pages 30-38, Z\u00fcrich, August.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The AT&T RT-02 STT system", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ljolje", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saraclar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bacchiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Roark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. RT02 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Ljolje, M. Saraclar, M. Bacchiani, M. Collins, and B. Roark. 2002. The AT&T RT-02 STT system. In Proc. RT02 Workshop, Vienna, Virginia.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Confusion-based query expansion for OOV words in spoken document retrieval", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Logan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Thong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the International Conference on Spoken Language Processing (ICSLP), Denver", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Logan and JM Van Thong. 2002. Confusion-based query expansion for OOV words in spoken document retrieval. In Proceedings of the International Confer- ence on Spoken Language Processing (ICSLP), Den- ver, Colorado, USA.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Word and sub-word indexing approaches for reducing the effects of OOV queries on spoken audio", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Logan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Moreno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Deshmukh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Logan, P. Moreno, and O. Deshmukh. 2002. Word and sub-word indexing approaches for reducing the ef- fects of OOV queries on spoken audio. In Proc. HLT.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Speech and language technologies for audio indexing and retrieval", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Kubala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Leek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the IEEE", |
|
"volume": "88", |
|
"issue": "8", |
|
"pages": "1338--1353", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Makhoul, F. Kubala, T. Leek, D. Liu, L. Nguyen, R. Schwartz, and A. Srivastava. 2000. Speech and language technologies for audio indexing and retrieval. Proceedings of the IEEE, 88(8):1338-1353, August.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Weighted finite-state transducers in speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mohri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Riley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Computer Speech and Language", |
|
"volume": "16", |
|
"issue": "1", |
|
"pages": "69--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Mohri, F. Pereira, and M. Riley. 2002. Weighted finite-state transducers in speech recognition. Com- puter Speech and Language, 16(1):69-88.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Subword-Based Approaches for Spoken Document Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Kenney", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenney Ng. 2000. Subword-Based Approaches for Spo- ken Document Retrieval. Ph.D. thesis, Massachusetts Institute of Technology.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "NIST TREC-9 SDR Web Site", |
|
"authors": [], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "NIST TREC-9 SDR Web Site. 2000. www.nist.gov/speech/tests/sdr/sdr2000/sdr2000.htm.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Towards automatic closed captioning: Low latency real time broadcast news transcription", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saraclar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Riley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Bocchieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Goffin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the International Conference on Spoken Language Processing (ICSLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Saraclar, M. Riley, E. Bocchieri, and V. Goffin. 2002. Towards automatic closed captioning: Low latency real time broadcast news transcription. In Proceedings of the International Conference on Spoken Language Processing (ICSLP), Denver, Colorado, USA.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Integration of Continuous Speech Recognition and Information Retrieval for Mutually Optimal Performance", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Siegler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew A. Siegler. 1999. Integration of Continuous Speech Recognition and Information Retrieval for Mu- tually Optimal Performance. Ph.D. thesis, Carnegie Mellon University.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Phonetic confusion matrix based spoken document retrieval", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Petkovic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 23rd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--87", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Srinivasan and D. Petkovic. 2000. Phonetic confu- sion matrix based spoken document retrieval. In Pro- ceedings of the 23rd Annual International ACM SIGIR Conference on Research and Development in Informa- tion Retrieval, pages 81-87.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "New techniques for open-vocabulary spoken document retrieval", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wechsler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Munteanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Sc\u00e4uble", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 21st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "20--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Wechsler, E. Munteanu, and P. Sc\u00e4uble. 1998. New techniques for open-vocabulary spoken document re- trieval. In Proceedings of the 21st Annual Interna- tional ACM SIGIR Conference on Research and De- velopment in Information Retrieval, pages 20-27, Mel- bourne, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Spoken Document Retrieval Based on Phoneme Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wechsler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Wechsler. 1998. Spoken Document Retrieval Based on Phoneme Recognition. Ph.D. thesis, Swiss Federal Institute of Technology (ETH), Zurich.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Using words and phonetic strings for efficient information retrieval from imperfectly transcribed spoken documents", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Witbrock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hauptmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "2nd ACM International Conference on Digital Libraries (DL'97)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "30--35", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Witbrock and A. Hauptmann. 1997. Using words and phonetic strings for efficient information retrieval from imperfectly transcribed spoken documents. In 2nd ACM International Conference on Digital Libraries (DL'97), pages 30-35, Philadelphia, PA, July.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Effects of out of vocabulary words in spoken document retrieval", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Woodland", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Jourlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.Sparck", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "372--374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P.C. Woodland, S.E. Johnson, P. Jourlin, and K.Sparck Jones. 2000. Effects of out of vocabulary words in spoken document retrieval. In Proc. SIGIR, pages 372-374, Athens, Greece.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Hybrid language models for out of vocabulary word detection in large vocabulary conversational speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Yazgan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saraclar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Yazgan and M. Saraclar. 2004. Hybrid language mod- els for out of vocabulary word detection in large vocab- ulary conversational speech recognition. In Proceed- ings of the IEEE International Conference on Acous- tics, Speech and Signal Processing (ICASSP), Mon- treal, Canada.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Precision Recall using word lattices for teleconferences In", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Comparison of word lattices and word/phone hybrid strategies for teleconferences", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF5": { |
|
"text": "Comparison of various recognition vocabulary sizes for Switchboard", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"html": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"html": null, |
|
"text": "Comparison of different sources for the phone index on the Teleconferences corpus", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"html": null, |
|
"text": "Results for word pair queries on Switchboard", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |