|
{ |
|
"paper_id": "D13-1023", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:41:50.041858Z" |
|
}, |
|
"title": "An Efficient Language Model Using Double-Array Structures", |
|
"authors": [ |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Yasuhara", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Tsukuba", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Toru", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Tsukuba", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jun-Ya", |
|
"middle": [], |
|
"last": "Norimatsu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Tsukuba", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mikio", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Tsukuba", |
|
"location": { |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "N gram language models tend to increase in size with inflating the corpus size, and consume considerable resources. In this paper, we propose an efficient method for implementing ngram models based on doublearray structures. First, we propose a method for representing backwards suffix trees using double-array structures and demonstrate its efficiency. Next, we propose two optimization methods for improving the efficiency of data representation in the double-array structures. Embedding probabilities into unused spaces in double-array structures reduces the model size. Moreover, tuning the word IDs in the language model makes the model smaller and faster. We also show that our method can be used for building large language models using the division method. Lastly, we show that our method outperforms methods based on recent related works from the viewpoints of model size and query speed when both optimization methods are used.", |
|
"pdf_parse": { |
|
"paper_id": "D13-1023", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "N gram language models tend to increase in size with inflating the corpus size, and consume considerable resources. In this paper, we propose an efficient method for implementing ngram models based on doublearray structures. First, we propose a method for representing backwards suffix trees using double-array structures and demonstrate its efficiency. Next, we propose two optimization methods for improving the efficiency of data representation in the double-array structures. Embedding probabilities into unused spaces in double-array structures reduces the model size. Moreover, tuning the word IDs in the language model makes the model smaller and faster. We also show that our method can be used for building large language models using the division method. Lastly, we show that our method outperforms methods based on recent related works from the viewpoints of model size and query speed when both optimization methods are used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "N gram language models (F. Jelinek, 1990) are widely used as probabilistic models of sentence in natural language processing. The wide use of the Internet has entailed a dramatic increase in size of the available corpora, which can be harnessed to obtain a significant improvement in model quality. In particular, Brants et al. (2007) have shown that the performance of statistical machine translation systems is monotonically improved with the increasing size of training corpora for the language model. However, models using larger corpora also consume more resources. In recent years, many methods for improving the efficiency of language models have been proposed to tackle this problem (Pauls and Klein, 2011; Kenneth Heafield, 2011) . Such methods not only reduce the required memory size but also raise query speed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 41, |
|
"text": "(F. Jelinek, 1990)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 334, |
|
"text": "Brants et al. (2007)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 714, |
|
"text": "(Pauls and Klein, 2011;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 738, |
|
"text": "Kenneth Heafield, 2011)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose the double-array language model (DALM) which uses double-array structures (Aoe, 1989) . Double-array structures are widely used in text processing, especially for Japanese. They are known to provide a compact representation of tries (Fredkin, 1960) and fast transitions between trie nodes. The ability to store and manipulate tries efficiently is expected to increase the performance of language models (i.e., improving query speed and reducing the model size in terms of memory) because tries are one of the most common representations of data structures in language models. We use double-array structures to implement a language model since we can utilize their speed and compactness when querying the model about an ngram.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 111, |
|
"text": "(Aoe, 1989)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 274, |
|
"text": "(Fredkin, 1960)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to utilize of double-array structures as language models, we modify them to be able to store probabilities and backoff weights. We also propose two optimization methods: embedding and ordering. These methods reduce model size and increase query speed. Embedding is an efficient method for storing ngram probabilities and backoff weights, whereby we find vacant spaces in the double-array language model structure and populate them with language model information, such as probabilities and backoff weights. Ordering is a method for compacting the double-array structure. DALM uses word IDs for all words of the ngram, and ordering assigns a word ID to each word to reduce the model size. These two optimization methods can be used simultaneously and are also expected to work well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In our experiments, we use a language model based on corpora of the NTCIR patent retrieval task (Atsushi Fujii et al., 2007; Atsushi Fujii et al., 2005; Atsushi Fujii et al., 2004; Makoto Iwayama et al., 2003) . The model size is 31 GB in the ARPA file format. We conducted experiments focusing on query speed and model size. The results indicate that when the abovementioned optimization methods are used together, DALM outperforms state-ofthe-art methods on those points.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 124, |
|
"text": "(Atsushi Fujii et al., 2007;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 152, |
|
"text": "Atsushi Fujii et al., 2005;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 153, |
|
"end": 180, |
|
"text": "Atsushi Fujii et al., 2004;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 209, |
|
"text": "Makoto Iwayama et al., 2003)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Tries (Fredkin, 1960) are one of the most widely used tree structures in ngram language models since they can reduce memory requirements by sharing common prefix. Moreover, since the query speed for tries depends only on the number of input words, the query speed remains constant even if the ngram model increases in size.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 21, |
|
"text": "(Fredkin, 1960)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tries and Backwards Suffix Trees", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Backwards suffix trees (Bell et al., 1990; Stolcke, 2002; Germann et al., 2009) are among the most efficient representations of tries for language models. They contain ngrams in reverse order of history words. Figure 1 shows an example of a backwards suffix tree representation. In this paper, we denote an ngram: by the form w 1 , w 2 , \u2022 \u2022 \u2022 , w n as w n 1 . In this example, word lists (represented as rectangular tables) contain target words (here, w n ) of ngrams, and circled words in the tree denote history words (here, w n\u22121 1 ) associated with target words. The history words \"I eat,\" \"you eat\", and \"do you eat\" are stored in reverse order. Querying this trie about an ngram is simple: just trace history words in reverse and then find the target word in a list. For example, consider querying about the trigram \"I eat fish\". First, simply trace the history in the trie in reverse order (\"eat\" \u2192 \"I\"); then, find \"fish\" in list <1>. Similarly, querying a backwards suffix tree about unknown ngrams is also efficient, because the backwards suffix tree Figure 1 : Example of a backwards suffix tree. There are two branch types in a backwards suffix tree: history words and target words. History words are shown in circles and target words are stored in word lists.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 42, |
|
"text": "(Bell et al., 1990;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 43, |
|
"end": 57, |
|
"text": "Stolcke, 2002;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 58, |
|
"end": 79, |
|
"text": "Germann et al., 2009)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 218, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1062, |
|
"end": 1070, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tries and Backwards Suffix Trees", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "representation is highly suitable for the backoff calculation. For example, in querying about the 4gram \"do you eat soup\", we first trace \"eat\" \u2192 \"you\" \u2192 \"do\" in a manner similar to above. However, searching for the word \"soup\" in list <3> fails because list <3> does not contain the word \"soup\". In this case, we return to the node \"you\" to search the list <2>, where we find \"soup\". This means that the trigram \"you eat soup\" is contained in the tree while the 4gram \"do you eat soup\" is not. This behavior can be efficiently used for backoff calculation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tries and Backwards Suffix Trees", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "SRILM (Stolcke, 2002) is a widely used language model toolkit. It utilizes backwards suffix trees for its data structures. In SRILM, tries are implemented as 64-bit pointer links, which wastes a lot of memory. On the other hand, the access speed for ngram probabilities is relatively high.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 21, |
|
"text": "(Stolcke, 2002)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tries and Backwards Suffix Trees", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In recent years, several methods have been proposed for storing language models efficiently in memory. Talbot and Osborne (2007) have proposed an efficient method based on bloom filters. This method modifies bloom filters to store count information about training sets. In prior work, bloom filters have been used for checking whether certain data are contained in a set. To store the count information, pairs from <ngram,1> to <ngram,count> are all added to the set for each ngram. To query this language model about the probability of an ngram, probabilities are calculated during querying by using these counts. Talbot and Brants (2008) have proposed a method based on perfect hash functions and bloomier filters. This method uses perfect hash functions to store ngrams and encode values (for exam-ple, probabilities or counts of ngrams in the training corpus) to a large array. Guthrie and Hepple (2010) have proposed a language model called ShefLM that uses minimal perfect hash functions (Belazzougui et al., 2009) , which can store ngrams without vacant spaces. Furthermore, values are compressed by simple dense coding (Fredriksson and Nikitin, 2007) . ShefLM achieves a high compression ratio when it stores counts of ngrams in the training corpus. However, when this method stores probabilities of ngrams, the advantage of using compression is limited because floating-point numbers are difficult to compress. Generally, compression is performed by combining the same values but, two floating-point numbers are rarely the same, especially in the case of probability values 1 . These methods implement lossy language models, meaning that, we can reduce the model size at the expense of model quality. These methods also reduce the model performance (perplexity). Pauls and Klein (2011) have proposed Berke-leyLM which is based on an implicit encoding structure, where ngrams are separated according to their order, and are sorted by word ID. The sorted ngrams are linked to each other like a trie structure. Berke-leyLM provides rather efficient methods. Variablelength coding and block compression are used if small model size is more important than query speed. In addition, Heafield (2011) has proposed an efficient language model toolkit called KenLM that has been recently used in machine translation systems, for which large language models are often needed. KenLM has two different main structure types: trie and probing. The trie structure is compact but relatively slower to query, whereas the probing structure is relatively larger but faster to query.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 128, |
|
"text": "Talbot and Osborne (2007)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 615, |
|
"end": 639, |
|
"text": "Talbot and Brants (2008)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 882, |
|
"end": 907, |
|
"text": "Guthrie and Hepple (2010)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 994, |
|
"end": 1020, |
|
"text": "(Belazzougui et al., 2009)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1127, |
|
"end": 1158, |
|
"text": "(Fredriksson and Nikitin, 2007)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1772, |
|
"end": 1794, |
|
"text": "Pauls and Klein (2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Efficient Language Models", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a language model structure based on double-array structures. As we describe in Section 3, double-array structures can be used as fast and compact representations of tries. We propose several techniques for maximizing the performance of double-array structures from the perspective of query speed and model size.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Efficient Language Models", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "3 Double-Array", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Efficient Language Models", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In DALM, we use a double-array structure (Aoe, 1989) to represent the trie of a language model. Double-array structures are trie representations consisting of two parallel arrays: BASE and CHECK . They are not only fast to query, but also provide a compact way to store tries. In the structure, nodes in the trie are represented by slots with the same index in both arrays. Before proposing several efficient language model representation techniques in Section 4, we introduce double-array themselves. In addition, the construction algorithms for double-arrays are described in Section 3.2 and Section 3.3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 41, |
|
"end": 52, |
|
"text": "(Aoe, 1989)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The most naive implementation of a trie will have a two-dimensional array NEXT . Let WORDID (w) be a function that returns a word ID as a number corresponding to its argument word w; then NEXT [n][WORDID (w)] (that presents the WORDID(w)-th slot of the nth row in the NEXT array) stores the node number which can be transit from the node number n by the word w, and we can traverse the trie efficiently and easily to serialize the array in memory. This idea is simple but wastes the most of the used memory because almost all of the slots are unused and this results in occupying memory space. The double-array structures solve this problem by taking advantage of the sparseness of the NEXT array. The two-dimensional array NEXT is merged into a one-dimensional array BASE by shifting the entries of each row of the NEXT array and combining the set of resulting arrays. We can store this result in much less memory than the serialization of the naive implementation above. Additionally, a CHECK array is introduced to check whether the transition is valid or not because we cannot distinguish which node the information in a particular slot comes from. Using a CHECK array, we can avoid transition errors and move safely to the child node of any chosen node.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As a definition, a node link from a node n s with a word w to the next node n next in the trie is defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "next \u2190 BASE [s] + WORDID (w) if CHECK [next] == s", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where s denotes the index of the slot in the double- array structure which represents n s . The trie transition from a node n s with a word w is applied according to the following steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Step 1 Calculating the \"next\" destination and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Step 2 Checking whether the transition is correct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Step 2 specifically means the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "1. If CHECK [next] == s, then we can \"move\" to the node n next ;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "2. otherwise, we can detect that the transition from the node n s with the word w is not contained in the trie. Figure 2 shows an example of a transition from a parent node n s with a word w. Next, we describe how the existence of an ngram history can be determined (Aoe, 1989) . We can iterate over the nodes by the transitions shown above and may find the node representing an ngram history. But we should check that it is valid because nodes except for leaf nodes possiblly represent a fragment of some total ngram history. We can use endmarker symbols to determine whether an ngram history is in the trie. We add nodes meaning the endmarker symbol after the last node of each ngram history. When querying about w n\u22121 1 , we transit repeatedly; in other words, we set s = 0 and start by applying Step 1 and 2 repeatedly for each word. When we reach the node w n\u22121 , we continue searching for an endmarker symbol. If the symbol is found, we know that the ngram history w n\u22121 1 is in the trie. The double-array structure consumes 8 bytes per node because the BASE and CHECK arrays are 4 byte array variables. Therefore, the structure can store nodes compactly in case of a high filling rate. Moreover, node transitions are very fast because they require only one addition and one comparison per transition. We use a double-array structure in DALM, which can maximize its potential.", |
|
"cite_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 277, |
|
"text": "(Aoe, 1989)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 120, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Double-Array Structure", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Greedy algorithms are widely used for constructing static double-array structures 2 . The construction steps are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Greedy Construction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "1. Define the root node of a trie to correspond to index 0 of the double-array structure and 2. Find the BASE value greedily (i.e., in order 1, 2, 3, \u2022 \u2022 \u2022) for all nodes which have fixed their indices in the double-array structure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Greedy Construction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In practice, once the BASE value of a node is fixed, the positions of its children are fixed at the same time, and we can find the BASE values for each child recursively. Figure 3 shows an example of such construction. In this example, three nodes (\"I\", \"you\" and \"they\") are inserted at the same time. This is because the above three node positions are fixed by the BASE value of the node \"eat\". To insert nodes \"I\", \"you\" and \"they\", the following three slots must be empty (i.e., the slots must not be used by other nodes.):", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 179, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Greedy Construction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 BASE [s] + WORDID(\"I\") \u2022 BASE [s] + WORDID(\"you\") \u2022 BASE [s] + WORDID(\"they\")", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Greedy Construction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where s is the index of the node \"eat\". At the construction step, we need to find BASE [s] which satisfies the above conditions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Greedy Construction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The construction time for a double-array structure poses the greatest challenge. We use a more efficient method (Nakamura and Mochizuki, 2006) instead of the naive method for constructing a doublearray structure because the naive method requires a long time. We call the method \"empty doubly-linked list\". This algorithm is one of the most efficient construction methods devised to date. Figure 4 shows an example of an empty doubly-linked list. We can efficiently define the BASE value of each node by using the CHECK array to store the next empty slot. In this example, in searching the BASE value of a node, the first child node can be set to position 1, and if that fails, we can successively try positions 3, 4, 6, 8, \u2022 \u2022 \u2022 by tracing the list instead of searching all BASE values 0, 1, 2, 3, 4, 5, \u2022 \u2022 \u2022.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 142, |
|
"text": "(Nakamura and Mochizuki, 2006)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 388, |
|
"end": 396, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Efficient Construction Algorithm", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As analyzed by Nakamura and Mochizuki(2006) , the computational cost of a node insertion is less than in the naive method. The original naive method requires O(N M ) time for a node insertion, where M is a number of unique word types and N is a number of nodes of the trie; the algorithm using an empty double-linked list requires O(U M ), where U is the number of unused slots.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 43, |
|
"text": "Nakamura and Mochizuki(2006)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Efficient Construction Algorithm", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As described in Section 5, we divide the trie into several smaller tries and apply the efficient method for constructing our largest models. This is because it is not feasible to wait several weeks for the large language model structure to be built. The dividing method is currently the only method allowing us to build them. 4 Proposed Methods", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Efficient Construction Algorithm", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section, we present the application of the double-array structure to backwards suffix trees. As this is the most basic structure based on double-array structures, we refer to it as the simple structure and improve its performance as described in the following sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DALM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To represent a backwards suffix tree as a doublearray structure, we should modify the tree because it has two types of branches (target words and history nodes), which must be distinguished in the doublearray structure. Instead, we should distinguish the branch type which indicates whether the node is a target word or a history word. We use the endmarker symbol (<#>) for branch discrimination. In prior work, the endmarker symbol has been used to indicate whether an ngram is in the trie. However, there is no need to distinguish whether the node of the tree is included in the language model because all nodes of a backwards suffix tree which represents ngrams surely exist in the model. We use the endmarker symbol to indicate nodes which are end-of-history words. Therefore, target words of ngrams are children of the endmarker symbols that they follow. By using the endmarker symbol, target words can be treated the same as ordinary nodes because all target words are positioned after <#>. Figure 5 shows an example of such construction. We can clearly distinguish target words and history words in the backwards suffix tree.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 997, |
|
"end": 1005, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DALM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Querying in the tree is rather simple. For example, consider the case of a query trigram \"I eat fish\" in the trie of Figure 5 . We can trace this trigram in Figure 5 : An example of converting a backwards suffix tree. We introduce endmarker symbols to distinguish the two branch types. We can treat the tree as an ordinary trie that can be represented by a double-array structure while retaining the advantages of the tree structure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 125, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 165, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DALM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "the same way as the original backwards suffix tree. First, we trace \"eat\" \u2192 \"I\", then trace that to the endmarker symbol <#> and finally find the word \"fish\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DALM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Next, we describe the procedure for storing probabilities and backoff weights. We prepare a VALUE array to store the probabilities and backoff weights of ngrams. Figure 6 shows the simple DALM structure. The backwards suffix tree stores a backoff weight for each node and a probability for each target word. In simple DALM, each value is stored for the respective position of the corresponding node.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 170, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "DALM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Embedding is a method for reducing model size. In the simple DALM structure, there are many vacant spaces in the BASE and CHECK arrays. We use these vacant spaces to store backoff weights and probabilities. Figure 7 shows vacant spaces in the simple DALM structure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 215, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Embedding", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "First, the BASE array slots of target word nodes are unused because target words are always in leaf positions in the backwards suffix tree and do not have any children nodes. In the example of Figure 7 , BASE [9] is not used, and therefore can be used for storing a probability value. This method can reduce the model size because all probabilities are stored into the BASE array. As a result, the VALUE array contains only backoff weights.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 201, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Embedding", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Next, the CHECK array slots of endmarker symbols are also vacant. We do not need to check for endmarker symbol transition because the endmarker symbol <#> is seen for all nodes except target word nodes. This means that all endmarker symbol transitions are ensured to be correct and the CHECK array slots of endmarker symbols do not need to be used. We use this space to store backoff weights.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embedding", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In order to avoid false positives, we cannot store backoff weights directly. Instead, we store the positions of the backoff weights in the VALUE array as negative numbers. When a query for an unknown ngram encounters an endmarker symbol node, the value of the CHECK array is never matched because the corresponding value stored there is negative. The same values in the VALUE array can be unified to reduce the memory requirements. Figure 8 illustrates an example of the embedding method. Figure 8 : Implementation of the embedding method. We use vacant spaces in the VALUE array to store the probabilities and indices of backoff weights. The indices of backoff weights are taken with a negative sign to avoid false positives. Backoff weights are stored in the VALUE array, and the same values in the VALUE array can be unified.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 432, |
|
"end": 441, |
|
"text": "Figure 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 498, |
|
"text": "Figure 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Embedding", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Ordering is a method for shortening the doublearray structure and increasing the query speed. In ordering, word IDs are assigned in order of unigram probability. This is done at a preprocessing stage, before the DALM is built.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ordering", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Before explaining the reasons why this method is effective, we present an interpretation of doublearray construction in Figure 9 which corresponds to the case presented in Figure 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 128, |
|
"text": "Figure 9", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 172, |
|
"end": 180, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ordering", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In the previous section, we pointed out that the insertion problem is equivalent to the problem of finding the BASE value of the parent node. Here, we expand this further into the idea that finding the BASE value is equivalent to the problem of finding the shift length of an insertion array. We can create an insertion array which is an array of flag bits set to 1 at the positions of word IDs of children nodes' words. Moreover, we prepare a used array which is also a flag bit array denoting whether the original slots in the double-array structure are occupied. In this situation, finding the shift length is equivalent to the problem of finding the BASE value of the slot for the node \"eat\", and the combined used array denotes the size of the double-array structure after insertion. Figure 10 shows an intuitive example illustrating the efficiency of the ordering method. When word IDs are assigned in order of unigram probability, 1s in the insertion array are gathered toward The insertion problem for the double-array structure is interpreted as a finding problem of a shift length of the insertion array. We can measure the size of the doublearray structure in the used array.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 789, |
|
"end": 798, |
|
"text": "Figure 10", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ordering", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "the beginning of the array. This means that 1s in the insertion array form clusters, which makes insertion easier than for unordered insertion arrays. This shortens the shift lengths for each insertion array: a shorter double-array structure results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ordering", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "To compare the performance of DALM with other methods, we conduct experiments on two ngram models built from small and large training corpora. Table 1 shows the specifications of the model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 150, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Training data are extracted from the Publication of unexamined Japanese patent applications, which is distributed with the NTCIR 3,4,5,6 patent retrieval task (Atsushi Fujii et al., 2007; Atsushi Fujii et al., 2005; Atsushi Fujii et al., 2004; Makoto Iwayama et al., 2003) . We used data for the period from Figure 10 : An example of word ID ordering efficiency. Word IDs in the insertion array are packed to the front in advance. Therefore, shift lengths for ordered arrays are often shorter than unordered ones. The resulting size of the double-array structure is expected to be smaller than that of an unordered array. 1,993 to 2,002 and extracted paragraphs containing \"background\" and \"example\". This method is similar to the NTCIR 7 Patent Translation Task (Fujii et al., 2008) . The small and large training data sets contained 100 Mwords and 5 Gwords, respectively. Furthermore, we sampled another 100 Mwords as a test set to measure the access speed for extracting ngram probabilities. We used an Intel Our experiments were performed from the viewpoints of speed and model size. We executed each program twice, and the results of the second run were taken as the final performance. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 187, |
|
"text": "(Atsushi Fujii et al., 2007;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 215, |
|
"text": "Atsushi Fujii et al., 2005;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 243, |
|
"text": "Atsushi Fujii et al., 2004;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 272, |
|
"text": "Makoto Iwayama et al., 2003)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 763, |
|
"end": 783, |
|
"text": "(Fujii et al., 2008)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 317, |
|
"text": "Figure 10", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We compared the performance of the DALMs proposed here, namely simple, embedding, ordering and both, where both indicates that the language model uses both embedding and ordering. We conducted experiments examining how these methods affect the size of the doublearray structures and the query speeds. We used the 100 Mwords model in the comparison because it was difficult to build a DALM using the 5 Gwords model. The results are shown in Figure 11 and Table 2 . While both ordering and embedding decreased the model size, the query speed was increased by the former and decreased by the latter. Both was the smallest and most balanced method.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 449, |
|
"text": "Figure 11", |
|
"ref_id": "FIGREF7" |
|
}, |
|
{ |
|
"start": 454, |
|
"end": 461, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Optimization Methods", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Building a double-array structure requires a long time, which can sometimes be impractical. In fact, as mentioned above, waiting on construction of the double-array structure of the 5 Gwords model is infeasible. As described in Section 3.3, the efficient algorithm requires O(U M ) time to insert one node and the insertion is iterated N (the total number of insertions) times. If we assume that the number of unused slots at the ith insertion, U i , is proportional to i, or that U i = c \u00d7 i where c is a proportionality constant, we can calculate the building time as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Divided Double-Array Structure", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "\u2211 N i=1 U i M = O ( M N 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Divided Double-Array Structure", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": ". To shorten the build time, we divided the original trie into several parts. Building parts of the original trie is possible because N is reduced. Moreover, these double-array structures can be built in parallel. Note that query results for both original and divided tries are completely equivalent because divided tries hold all the ngram statistics of the original trie. This method is similar to that used in randomized language models (Talbot and Brants, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 465, |
|
"text": "(Talbot and Brants, 2008)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Divided Double-Array Structure", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We compared the differences between the methods using the original and divided double-array structures. In the comparison, we also used the 100 Mwords model with the both optimization method described in the previous section (Figure 12 and Table 3).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 235, |
|
"text": "(Figure 12", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Divided Double-Array Structure", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Although dividing the trie increased the size of the DALM slightly, the model size was still smaller than that without optimization. Query speed increased as the number of parts was increased. We attributed this to the divided DALM consisting of several double-array structures, each smaller than the undivided structure which results in an increase. Figure 12 shows that there is a trade-off relation between model size and query speed.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 360, |
|
"text": "Figure 12", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Divided Double-Array Structure", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Below, we use the 5 Gwords model in our experiments. In our environment, building a 5 Gwords double-array structure required about 4 days when the double-array structures were divided into 8 parts, even though we used the more efficient algorithm described in Section 3.3. The time required for building the model when the original structure was divided into less than 8 parts was too long. Thus, a more efficient building algorithm is essential for advancing this research further.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Divided Double-Array Structure", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Using the 100 Mwords and 5 Gwords models, we compared DALM with other methods (KenLM (Kenneth Heafield, 2011) and SRILM (Stolcke, 2002) ). In this experiment, we used the both method (which is mentioned above) for DALM and divided the original trie into 8 parts and built double-array structures. The results are shown in Figure 13 and Table 4 ; the group on the left shows the results for the 100 Mwords model and the group on the right shows the results for the 5 Gwords model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 135, |
|
"text": "(Stolcke, 2002)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 331, |
|
"text": "Figure 13", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 336, |
|
"end": 343, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison with Other Methods", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The experimental results clearly indicate that DALM is the fastest of all the compared methods and that the model size is nearly the same or slightly smaller than that of KenLM (Probing). Whereas KenLM (Trie) is the smallest model, it is slower than DALM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with Other Methods", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "The differences between the 5 Gwords versions of DALM and KenLM (Probing) are smaller in comparison with the 100 Mwords models. This is because hash-based language models have an advantage when storing higher-order ngrams. Large language models have more 5grams, which leads to shorter backoff times. On the other hand, trie-based language models have to trace higher-order ngrams for every query, which requires more time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with Other Methods", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Finally, we discuss practical situations. We con- ducted this study's experiments using test set text written by humans. In some applications such as statistical machine translations, language model systems should compute probabilities of many unnatural ngrams which will be unknown. This may affect query speed because querying unknown and unnatural ngrams generate many backoffs. They may results in trie-based LM being slightly faster, because traversing the trie can stop immediately when it detects that a queried ngram history is not contained in the trie. On the other hand, hash-based LM such as KenLM probing would repeat queries until finding truncated ngram histories in the trie.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with Other Methods", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We proposed a method for implementing language models based on double-array structures. We call this method DALM. Moreover, we proposed two methods for optimizing DALM: embedding and ordering. Embedding is a method whereby empty spaces in arrays are used to store ngram probabilities and backoff weights, and ordering is a method for numbering word IDs; these methods re-duce model size and increase query speed. These two optimization methods work well independently, but even better performance can be achieved if they are combined. We also used a division method to build the model structure in several parts in order to speed up the construction of double-array structures. Although this procedure results in a slight increase in model size, the divided double-array structures mostly retained the compactness and speed of the original structure. The time required for building doublearray structures is the bottleneck of DALM as it is sometimes too long to be practical, even though the model structure itself achieves high performance. In future work, we will develop a faster algorithm for building double-array structures.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "While DALM has outperformed state-of-the-art language model implementations methods in our experiments, we should continue to consider ways to optimize the method for higher-order ngrams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In our experience, it is considerably easier to compress backoff weights than to compress probabilities, although both are represented with floating-point numbers. We use this knowledge in our methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We were unable to find an original source for this technique. However, this method is commonly used in double-array implementations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the anonymous reviewers for many valuable comments. This work is supported by JSPS KAKENHI Grant Number 24650063.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "An Efficient Digital Search Algorithm by Using a Double-Array Structure", |
|
"authors": [ |
|
{ |
|
"first": "J.-I", |
|
"middle": [], |
|
"last": "Aoe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1989, |
|
"venue": "IEEE Transactions on Software Engineering", |
|
"volume": "15", |
|
"issue": "9", |
|
"pages": "1066--1077", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J.-I. Aoe. 1989. An Efficient Digital Search Algorithm by Using a Double-Array Structure. IEEE Transac- tions on Software Engineering, 15(9):1066-1077.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Overview of the Patent Retrieval Task at NTCIR-4", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Iwayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Fujii, Makoto Iwayama, and Noriko Kando. 2004. Overview of the Patent Retrieval Task at NTCIR-4.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Overview of Patent Retrieval Task at NTCIR-5", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Iwayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Fujii, Makoto Iwayama, and Noriko Kando. 2005. Overview of Patent Retrieval Task at NTCIR- 5.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Overview of the Patent Retrieval Task at the NTCIR-6 Workshop", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Iwayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "359--365", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Fujii, Makoto Iwayama, and Noriko Kando. 2007. Overview of the Patent Retrieval Task at the NTCIR-6 Workshop. pages 359-365.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Hash, displace, and compress", |
|
"authors": [ |
|
{ |
|
"first": "Djamal", |
|
"middle": [], |
|
"last": "Belazzougui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Fabiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Botelho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dietzfelbinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ESA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "682--693", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Djamal Belazzougui, Fabiano C. Botelho, and Martin Di- etzfelbinger. 2009. Hash, displace, and compress. In ESA, pages 682-693.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Large Language Models in Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Ashok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Popat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "858--867", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Brants, Ashok C. Popat, Peng Xu, Franz J. Och, and Jeffrey Dean. 2007. Large Language Models in Machine Translation. In Proceedings of the 2007 Joint Conference on EMNLP-CoNLL, pages 858-867. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Self-organized language modeling for speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Merialdo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Jelinek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Merialdo F. Jelinek. 1990. Self-organized language modeling for speech recognition.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Trie memory", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Fredkin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1960, |
|
"venue": "Communications of the ACM", |
|
"volume": "3", |
|
"issue": "9", |
|
"pages": "490--499", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Fredkin. 1960. Trie memory. Communications of the ACM, 3(9):490-499.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Simple Compression Code Supporting Random Access and Fast String Matching", |
|
"authors": [ |
|
{ |
|
"first": "Kimmo", |
|
"middle": [], |
|
"last": "Fredriksson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fedor", |
|
"middle": [], |
|
"last": "Nikitin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 6th international conference on Experimental algorithms, WEA'07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "203--216", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kimmo Fredriksson and Fedor Nikitin. 2007. Simple Compression Code Supporting Random Access and Fast String Matching. In Proceedings of the 6th in- ternational conference on Experimental algorithms, WEA'07, pages 203-216. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Overview of the Patent Translation Task at the NTCIR-7 Workshop", |
|
"authors": [ |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masao", |
|
"middle": [], |
|
"last": "Utiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikio", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takehito", |
|
"middle": [], |
|
"last": "Utsuro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Atsushi Fujii, Masao Utiyama, Mikio Yamamoto, and Takehito Utsuro. 2008. Overview of the Patent Trans- lation Task at the NTCIR-7 Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Tightly Packed Tries: How to Fit Large Models into Memory, and Make them Load Fast, Too", |
|
"authors": [ |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Germann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Joanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Larkin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Workshop on SETQA-NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "31--39", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ulrich Germann, Eric Joanis, and Samuel Larkin. 2009. Tightly Packed Tries: How to Fit Large Models into Memory, and Make them Load Fast, Too. In Proceed- ings of the Workshop on SETQA-NLP, pages 31-39. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Storing the Web in Memory: Space Efficient Language Models with Constant Time Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Guthrie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hepple", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Conference on EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "262--272", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Guthrie and Mark Hepple. 2010. Storing the Web in Memory: Space Efficient Language Models with Constant Time Retrieval. In Proceedings of the 2010 Conference on EMNLP, pages 262-272. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "KenLM: Faster and Smaller Language Model Queries", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Sixth Workshop on Statistical Machine Translation. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Heafield. 2011. KenLM: Faster and Smaller Language Model Queries. In Proceedings of the Sixth Workshop on Statistical Machine Translation. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Overview of Patent Retrieval Task at NTCIR-3", |
|
"authors": [ |
|
{ |
|
"first": "Makoto", |
|
"middle": [], |
|
"last": "Iwayama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atsushi", |
|
"middle": [], |
|
"last": "Fujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noriko", |
|
"middle": [], |
|
"last": "Kando", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akihiko", |
|
"middle": [], |
|
"last": "Takano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Makoto Iwayama, Atsushi Fujii, Noriko Kando, and Aki- hiko Takano. 2003. Overview of Patent Retrieval Task at NTCIR-3.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Fast Computation of Updating Method of a Dictionary for Compression Digital Search Tree", |
|
"authors": [ |
|
{ |
|
"first": "Yasumasa", |
|
"middle": [], |
|
"last": "Nakamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hisatoshi", |
|
"middle": [], |
|
"last": "Mochizuki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "47", |
|
"issue": "", |
|
"pages": "16--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yasumasa Nakamura and Hisatoshi Mochizuki. 2006. Fast Computation of Updating Method of a Dictio- nary for Compression Digital Search Tree. Trans- actions of Information Processing Society of Japan. Data, 47(13):16-27.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Faster and Smaller N-Gram Language Models", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Pauls", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the ACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "258--267", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Pauls and Dan Klein. 2011. Faster and Smaller N-Gram Language Models. In Proceedings of the 49th Annual Meeting of the ACL-HLT, pages 258-267. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "SRILM-an Extensible Language Modeling Toolkit", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Seventh International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Stolcke. 2002. SRILM-an Extensible Language Mod- eling Toolkit. Seventh International Conference on Spoken Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Randomized Language Models via Perfect Hash Functions", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Talbot and Thorsten Brants. 2008. Randomized Language Models via Perfect Hash Functions. In Pro- ceedings of ACL-08: HLT.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Smoothed Bloom Filter Language Models: Tera-Scale LMs on the Cheap", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "468--476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Talbot and Miles Osborne. 2007. Smoothed Bloom Filter Language Models: Tera-Scale LMs on the Cheap. In Proceedings of the 2007 Joint Confer- ence on EMNLP-CoNLL, pages 468-476.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "A trie and a corresponding double-array structure. Node n s is represented by the slots BASE [s] and CHECK [s]. A link from a node n s with a word w is indicated by CHECK [next] == s.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Greedy insertion of trie elements. The children of a node are collectively inserted into the double-array when the BASE value of the node is fixed.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"text": "Empty doubly-linked list. Unused CHECK slots are used to indicate the next unused slots, and unused BASE slots are used to indicate previous unused slots. Thus, the BASE and CHECK arrays are used as a doubly-linked list which can reduce the number of ineffective trials.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "The simple DALM data structure. The BASE and CHECK arrays are used in the same way as in a double-array structure. To return probabilities and backoff weights, a VALUE array is introduced.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF4": { |
|
"text": "Unused slots in the simple DALM structure used for other types of information, such as probabilities.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF5": { |
|
"text": "Interpretation of a double-array construction.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF7": { |
|
"text": "Comparison between tuned and non-tuned double-array structures.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF8": { |
|
"text": "Comparison between divided and original double-array structures.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF9": { |
|
"text": "Comparison between DALM and other language model systems.", |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"text": "Corpus and model specifications.", |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"2\">Corpus Unique</td><td>N gram</td></tr><tr><td>Model</td><td>Size</td><td>Type</td><td>Type</td></tr><tr><td/><td colspan=\"3\">(words) (words) (1-5gram)</td></tr><tr><td>100 Mwords</td><td>100 M</td><td>195 K</td><td>31 M</td></tr><tr><td>5 Gwords</td><td colspan=\"2\">5 G 2,140 K</td><td>936 M</td></tr><tr><td>Test set</td><td>100 M</td><td>198 K</td><td>-</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Comparison between tuned and non-tuned double-array structures.", |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Method</td><td>Size (MB) (queries/s) Speed</td></tr><tr><td>Simple</td><td>1,152 1,065,536</td></tr><tr><td>Embedding</td><td>782 1,004,555</td></tr><tr><td>Ordering</td><td>726 1,083,703</td></tr><tr><td>Both</td><td>498 1,057,607</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Comparison between divided and original double-array structures.", |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Number of parts</td><td>Size (MB) (queries/s) Speed</td></tr><tr><td>1</td><td>498 1,057,607</td></tr><tr><td>2</td><td>502 1,105,358</td></tr><tr><td>4</td><td>510 1,087,619</td></tr><tr><td>8</td><td>540 1,098,594</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "Comparison between DALM and other methods.", |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"2\">100 Mwords Model</td><td colspan=\"2\">5 Gwords Model</td></tr><tr><td>LM</td><td colspan=\"2\">Size (MB) (queries/s) Speed</td><td colspan=\"2\">Size (MB) (queries/s) Speed</td></tr><tr><td>SRILM</td><td>1,194</td><td colspan=\"2\">894,138 31,747</td><td>729,447</td></tr><tr><td>KenLM (Probing)</td><td>665</td><td colspan=\"2\">1,002,489 18,685</td><td>913,208</td></tr><tr><td>KenLM (Trie)</td><td>340</td><td>804,513</td><td>9,606</td><td>635,300</td></tr><tr><td>DALM (8 parts)</td><td>540</td><td colspan=\"2\">1,098,594 15,345</td><td>953,186</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |