|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:59:18.689347Z" |
|
}, |
|
"title": "In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Sheng-Chieh", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Waterloo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jheng-Hong", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Waterloo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Waterloo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Cheriton", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Waterloo", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the Col-BERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT's expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacherstudent setup is that we can efficiently add inbatch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using Col-BERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently. * Contributed equally. The standard reranker architecture, while effective, exhibits high query latency, on the order of seconds per query (Hofst\u00e4tter and Hanbury, 2019; Khattab and Zaharia, 2020) because expensive neural inference must be applied at query time on query-passage pairs. This design is known as a cross-encoder (Humeau et al., 2020), which exploits query-passage attention interactions across all transformer layers. As an alternative, a biencoder design provides an approach to ranking with dense representations that is far more efficient than cross-encoders (", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the Col-BERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT's expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacherstudent setup is that we can efficiently add inbatch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using Col-BERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently. * Contributed equally. The standard reranker architecture, while effective, exhibits high query latency, on the order of seconds per query (Hofst\u00e4tter and Hanbury, 2019; Khattab and Zaharia, 2020) because expensive neural inference must be applied at query time on query-passage pairs. This design is known as a cross-encoder (Humeau et al., 2020), which exploits query-passage attention interactions across all transformer layers. As an alternative, a biencoder design provides an approach to ranking with dense representations that is far more efficient than cross-encoders (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "For well over half a century, solutions to the ad hoc retrieval problem-where the system's task is return a list of top k texts from an arbitrarily large corpus D that maximizes some metric of quality such as average precision or NDCG-has been dominated by sparse vector representations, for example, bag-of-words BM25. Even in modern multi-stage ranking architectures, which take advantage of large pretrained transformers such as BERT (Devlin et al., 2019) , the models are deployed as rerankers over initial candidates retrieved based on sparse vector representations; this is sometimes called \"first-stage retrieval\". One well-known example of this design is the BERT-based reranker of Nogueira and Cho (2019) ; see Lin et al. (2020) for a recent survey. multi-vector bi-encoders or cross-encoders. Hence, improving the effectiveness of single-vector biencoders represents an important problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 437, |
|
"end": 458, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 713, |
|
"text": "Nogueira and Cho (2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 737, |
|
"text": "Lin et al. (2020)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One approach to improving the effectiveness of single-vector bi-encoders is hard negative mining, by training with carefully selected negative examples that emphasize discrimination between relevant and non-relevant texts. There are several approaches to accomplish this. Karpukhin et al. (2020) and Qu et al. (2020) leverage large in-batch negatives to enrich training signals. Guu et al. (2020) and propose to mine hard negatives using the trained bi-encoder itself. By searching for global negative samples from an asynchronously updated ANN index, the bi-encoder can learn information not present in the training data produced by sparse representations . However, both large in-batch negative sampling and asynchronous ANN index updates are computationally demanding. The later is especially impractical for large corpora since it requires periodic inference over all texts in the corpus to ensure that the best negative examples are retrieved.", |
|
"cite_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 295, |
|
"text": "Karpukhin et al. (2020)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 316, |
|
"text": "Qu et al. (2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 396, |
|
"text": "Guu et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There is also work that explores knowledge distillation (KD) (Hinton et al., 2015) to enhance retrieval effectiveness and efficiency. Most related to our study is Hofst\u00e4tter et al. (2020) , who demonstrate that KD using a cross-encoder teacher significantly improves the effectiveness of bi-encoders for dense retrieval. Similarly, Barkan et al. (2020) investigate the effectiveness of distilling a trained cross-encoder into a bi-encoder for sentence similarity tasks. Gao et al. (2020a) explore KD combinations of different objectives such as language modeling and ranking. However, the above papers use computationally expensive cross-encoder teacher models; thus, combining them for KD with more advanced negative sampling techniques can be impractical.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 82, |
|
"text": "(Hinton et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 187, |
|
"text": "Hofst\u00e4tter et al. (2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 352, |
|
"text": "Barkan et al. (2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 488, |
|
"text": "Gao et al. (2020a)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In light of existing work on hard negative mining and knowledge distillation, we propose to improve the effectiveness of single-vector bi-encoders with a more efficient KD approach: in-batch KD using a bi-encoder teacher. The advantage of our design is that, during distillation, it enables the efficient exploitation of all possible query-passage pairs within a minibatch, which we call tight coupling (illustrated in Figure 1 ). This is a key difference between our KD approach and previous methods for dense retrieval, where only the scores of given query-passage triplets (not all combinations) are", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 419, |
|
"end": 427, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Cross-Encoder", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bi-Encoder", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "d \u2212 q0 q 0 d + q0 q 0 q 1 q 2 q 1 d \u2212 q 1 d + q 1 q 2 d \u2212 q2 d + q2 q 0 q q 2 d + q0 d + q 1 d + q2 d \u2212 q0 d \u2212 q d \u2212 q2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bi-Encoder", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Bi-Encoder", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Batch triplets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Target: Pairwise KD Target: In-batch KD Teacher Student Embeddings q 0 q 1 q 2 d \u2212 q0 d \u2212 q 1 d \u2212 q2 d + q0 d + q d + q2 d \u2212 q2 d + q2 d \u2212 q 1 d + q d \u2212 q0 d + q0 d \u2212 q2 d + q2 d \u2212 q 1 d + q 1 d \u2212 q0 d + q0 d \u2212 q2 d + q2 d \u2212 q 1 d + q 1 d \u2212 q0 d + q0 q0 q1 q2 d \u2212 q0 d \u2212 q1 d \u2212 q2 d + q0 d + q1 d + q2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Batch triplets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Teacher Figure 1 : Illustration of the differences between pairwise knowledge distillation and our proposed in-batch knowledge distillation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 16, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Batch triplets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "computed due to the computational costs of crossencoders (Hofst\u00e4tter et al., 2020; Gao et al., 2020a; Barkan et al., 2020) . The contribution of this work is a simple technique for efficiently adding in-batch negative samples during knowledge distillation when training a single-vector bi-encoder. For the remainder of this paper, we refer to this technique as \"in-batch KD\" for convenience. We empirically show that our model, even trained with BM25 negatives, can be more effective than cross-encoder teachers. With hard negatives, our method approaches the state of the art in dense retrieval. Our in-batch KD technique is able to incorporate hard negatives in a computationally efficient manner, without requiring large amounts of GPU memory for large batch sizes or expensive periodic index refreshes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 82, |
|
"text": "(Hofst\u00e4tter et al., 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 101, |
|
"text": "Gao et al., 2020a;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 122, |
|
"text": "Barkan et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Batch triplets", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We focus on improving the training efficiency and retrieval effectiveness of dense retrieval and begin by formalizing it as a dense representation learning problem. To be more specific, we propose to use knowledge distillation to enrich training signals and stabilize the representation learning procedure of bi-encoder models in the context of the well-known Noise-Contrastive Estimation (NCE) framework.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The bi-encoder design has been widely adopted for dense retrieval Guu et al., 2020; Karpukhin et al., 2020; Luan et al., 2021; Qu et al., 2020; , where queries and passages are encoded in a low-dimensional space. It aims to learn lowdimensional representations that pull queries and relevant passages together and push queries and non-relevant passages apart.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 83, |
|
"text": "Guu et al., 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 84, |
|
"end": 107, |
|
"text": "Karpukhin et al., 2020;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 126, |
|
"text": "Luan et al., 2021;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 127, |
|
"end": 143, |
|
"text": "Qu et al., 2020;", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense Retrieval with Bi-encoders", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Following the work of Mnih and Kavukcuoglu (2013), we formulate a common objective for dense representation learning for passage retrieval. Given a query q and a parameterized scoring function \u03c6 \u03b8 that computes the relevance between a query and a candidate passage p, we define a probability distribution over documents in a corpus D with respect to relevance, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense Retrieval with Bi-encoders", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P q \u03b8 (p, D) = exp(\u03c6 \u03b8 (q, p)) p \u2208D exp(\u03c6 \u03b8 (q, p )) = exp(h q \u2022 h p ) p \u2208D exp(h q \u2022 h p ) ,", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Dense Retrieval with Bi-encoders", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where h q (h p ) \u2208 R d denotes the query (passage) representation produced by the bi-encoder. A typical bi-encoder uses a simple scoring function for \u03c6 \u03b8 , for example, the inner product of two vectors, as shown above.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense Retrieval with Bi-encoders", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The main challenge of evaluating and computing gradients of Eq. (1) is the prohibitively expensive computation cost given the number of passages in the corpus D, typically millions (or even more). This is already setting aside the cost of using pretrained transformers such as BERT as the encoder to compute h q and h p .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense Retrieval with Bi-encoders", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Thus, previous work approximates Eq. (1) by NCE, which samples p \u2208 D + from training data and p \u2208 D = {D + \u222a D \u2212 }, where D \u2212 is from a noisy distribution such as candidates retrieved by BM25 (Nogueira and Cho, 2019) , filtered by finetuned transformers (Qu et al., 2020) , or retrieved by an asynchronously updated bi-encoder model itself . Another simple yet effective approach is in-batch negative sampling, as used by Karpukhin et al. (2020) , which takes p and p of other queries within a minibatch as negative examples in NCE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 216, |
|
"text": "(Nogueira and Cho, 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 271, |
|
"text": "(Qu et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 445, |
|
"text": "Karpukhin et al. (2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense Retrieval with Bi-encoders", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Other than designing sophisticated sampling methods for p , training bi-encoder models using knowledge distillation (KD) with effective teacher models is another promising approach (Hofst\u00e4tter et al., 2020) . In this case, we aim to make the bi-encoder model mimic the teacher model's probability distribution as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 206, |
|
"text": "(Hofst\u00e4tter et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Distillation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P q \u03b8;student (p, D ) = exp(h q \u2022 h p ) p \u2208D exp(h q \u2022 h p ) \u2248 exp(\u03c6\u03b8(q, p)/\u03c4 ) p \u2208D exp(\u03c6\u03b8(q, p )/\u03c4 ) = P q \u03b8;teacher (p, D ),", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Knowledge Distillation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where \u03c6\u03b8 denotes the relevance score estimated by a pretrained model parameterized by\u03b8 and \u03c4 , the temperature hyperparameter used in the KD framework. To improve retrieval effectiveness, one can leverage pre-computed scores from pretrained models such as cross-encoders, e.g., BERT, bi-encoders, e.g., ColBERT, or ensembled scores from multiple models \u03c6\u03b8 = j \u03c6\u03b8 ;j .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Distillation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "3 Our Approach", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Distillation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Using KD in Eq. (2) provides soft labels for biencoder training, and can be integrated with the previously mentioned NCE framework. In this work, we propose to enhance teacher-student interactions by adding in-batch negatives to our knowledge distillation. Specifically, we estimate \u03c6 \u03b8 on in-batch examples from a minibatch B guided by an auxiliary teacher model \u03c6\u03b8 through the minimization of Kullback-Leibler (KL) divergence of the two distributions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "In-batch Knowledge Distillation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "arg min \u03b8 q\u2208Q B p\u2208D B L \u03c6 \u03b8 ,\u03c6\u03b8 ,", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "In-batch Knowledge Distillation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where L \u03c6 \u03b8 ,\u03c6\u03b8 is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "In-batch Knowledge Distillation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "P q \u03b8;teacher (p, D B ) log P q \u03b8;teacher (p, D B ) P q \u03b8;student (p, D B )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "In-batch Knowledge Distillation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ". 4Note that here we consider all pairwise relationship between queries and passages within a minibatch that contains a query set Q B and a passage set D B .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "In-batch Knowledge Distillation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A cross-encoder has been shown to be an effective teacher (Hofst\u00e4tter et al., 2020; Gao et al., 2020a) since it allows rich interactions between the intermediate transformer representations of a query q and a passage p. For example, a \"vanilla\" crossencoder design using BERT can be denoted as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 83, |
|
"text": "(Hofst\u00e4tter et al., 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 84, |
|
"end": 102, |
|
"text": "Gao et al., 2020a)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c6\u03b8 ;Cat W f (h q\u2295p ),", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where the ranking score is first computed by the hidden representation of the concatenation q \u2295 p from BERT (along with the standard special tokens) and then mapped to a scalar by a pooling operation f and a mapping matrix W . Although effective, due to BERT's quadratic complexity with respect to input sequence length, this design makes exhaustive combinations between a query and possible candidates impractical, since this requires evaluating cross-encoders |B| 2 times to compute Eq. (3) using Eq. (5). Thus, an alternative is to conduct pairwise KD by computing the KL divergence of only two probabilities of a positive pair (q, p) and a negative pair (q, p ) for each query q. However, this might not yield a good approximation of Eq. 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "A bi-encoder can also be leveraged as a teacher model, which has the advantage that it is more feasible to perform exhaustive comparisons between queries and passages since they are passed through the encoder independently. Among biencoder designs, ColBERT is a representative model that uses late interactions of multiple vectors", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "({h 1 q , . . . , h i q }, {h 1 p , . . . , h j p })", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "to improve the robustness of dense retrieval, as compared to inner products of pairs of single vectors (h q , h p ). Specifically, Khattab and Zaharia (2020) propose the following fine-grained scoring function:", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 157, |
|
"text": "Khattab and Zaharia (2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c6\u03b8 ;MaxSim i\u2208|hq| max j\u2208|hp| h i q \u2022 h j p ,", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where i and j are the indices of token representations of a query q and a passage p of Col-BERT (Khattab and Zaharia, 2020) . The contribution of our work is in-batch knowledge distillation with a tightly-coupled teacher. The computation of \u03c6\u03b8 ;MaxSim enables exhaustive inference over all query-passage combinations in the minibatch B with only 2\u2022|B| computation cost, enabling enriched interactions between teacher and student. We call this design Tightly-Coupled Teacher ColBERT (TCT-ColBERT). Table 1 provides a training cost comparison between different teachers. When training with pairwise KD, crossencoders exhibit the highest training cost. On the other hand, ColBERT enables in-batch KD at a modest training cost compared to pairwise KD.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 123, |
|
"text": "(Khattab and Zaharia, 2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 497, |
|
"end": 504, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "TCT-ColBERT provides a flexible design for biencoders, as long as the encoders produce query and passage representations independently. For simplicity, our student model adopts shared encoder weights for both the query and the passage, just like the teacher model ColBERT. Following Khattab and Zaharia (2020) , for each query (passage), we prepend the [CLS] token and another special [Q] ([D]) token in the input sequence for both our teacher and student models. The student encoder outputs single-vector dense representations (h q , h p ) by performing average pooling over the token embeddings from the final layer. 1: Training cost comparison. We report the training time per batch against the baseline (without a teacher model) on a single TPU-v2. Our backbone model is BERT-base, with batch size 96. The in-batch cross-encoder training time is not available because it exceeds the memory limit.", |
|
"cite_spans": [ |
|
{ |
|
"start": 283, |
|
"end": 309, |
|
"text": "Khattab and Zaharia (2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Teacher / KD strategy Pairwise In-batch Cross-encoder (\u03c6\u03b8 ;Cat ) +48.1% OOM ColBERT (\u03c6\u03b8 ;MaxSim ) +32.7% +33.5%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Teacher Model Choice", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Given that in-batch negative sampling is an efficient way to add more information into knowledge distillation, we wonder whether our tightly-coupled teacher design works well when applied to more sophisticated sampling methods. Following the work of Xiong et al. 2021, we use our pretrained bi-encoder model, namely TCT-ColBERT, to encode the corpus and sample \"hard\" negatives for each query to create new training triplets by using the negatives D \u2212 of the bi-encoder instead of BM25. Specifically, we explore three different training strategies:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard Negative Sampling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "1. HN: we train the bi-encoder using in-batch hard negatives without the guide of ColBERT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard Negative Sampling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "2. TCT HN: we train the bi-encoder with TCT-ColBERT;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard Negative Sampling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "3. TCT HN+: we first fine-tune our ColBERT teacher with augmented training data containing hard negatives and then distill its knowledge into the bi-encoder student through TCT-ColBERT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard Negative Sampling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We empirically explore the effectiveness of these strategies for both passage and document retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hard Negative Sampling", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section, we conduct experiments on the MS MARCO passage and document corpora. For passage ranking, we first train models on BM25 negatives as warm-up and compare different KD methods. We then further train models on the hard negatives retrieved by the BM25 warmed-up checkpoint. For document ranking, following previous work Zhan et al., 2020; Lu et al., 2021) , we start with our BM25 warmed-up checkpoint for passage ranking and conduct additional hard negative training. In-batch KD (4) TCT-ColBERT 110M .350 / .344 1, 3 .967 1,3 .730 / .685 1 .745 1,2,3", |
|
"cite_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 351, |
|
"text": "Zhan et al., 2020;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 368, |
|
"text": "Lu et al., 2021)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 529, |
|
"text": "1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 531, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We perform ad hoc passage retrieval on the MS MARCO passage ranking dataset (Bajaj et al., 2016) , which consists of a collection of 8.8M passages from web pages and a set of \u223c0.5M relevant (query, passage) pairs as training data. We evaluate model effectiveness on two test sets of queries:", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 96, |
|
"text": "(Bajaj et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Passage Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "1. MARCO Dev: the development set of MS MARCO comprises 6980 queries, with an average of one relevant passage per query.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Passage Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "2. TREC-DL '19 (Craswell et al., 2019) : the organizers of the Deep Learning Track at the 2019 Text REtrieval Conference (TREC) released 43 queries with multi-graded (0-3) relevance labels on 9K (query, passage) pairs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 38, |
|
"text": "(Craswell et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Passage Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To evaluate output quality, we report MRR@10 (NDCG@10) for MARCO Dev (TREC-DL '19) and Recall@1K, denoted as R@1K. To compare with current state-of-the-art models, we evaluate our design, TCT-ColBERT, under two approaches for negative sampling: (1) BM25 and (2) hard negatives retrieved by the bi-encoder itself.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Passage Retrieval", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In this setting, models are trained using the official public data triples.train.small, where negative samples are produced by BM25. We compare different bi-encoder models using BERT-base as the backbone, which uses single 768-dim vectors to represent each query and passage:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with BM25 Negatives", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "1. Baseline: a single-vector bi-encoder trained with in-batch negatives, as discussed in Section 2.1, which is similar to Karpukhin et al. (2020) but with a smaller batch size.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 145, |
|
"text": "Karpukhin et al. (2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with BM25 Negatives", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "2. Pairwise KD: the approach of Hofst\u00e4tter et al. (2020) , who improve ranking effectiveness using cross-encoders with pairwise KD.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 56, |
|
"text": "Hofst\u00e4tter et al. (2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with BM25 Negatives", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "We also compare against two models, KD-T1 and KD-T2, which use BERT-base bi-encoders as student models. In the former, the student is distilled from a BERT-base cross-encoder, while the latter is distilled from ensembled cross-encoders comprising BERT-base, BERT-large, and ALBERTlarge. These figures reported in Table 2 are copied from Hofst\u00e4tter et al. (2020) . For a fair comparison with our models based on KL-divergence KD, we also implement our KD-T2 using the precomputed pairwise softmax probabilities provided by Hofst\u00e4tter et al. (2020) (who use MSE margin loss for KD). In addition, we adopt pairwise softmax probabilities from fine-tuned ColBERT to train KD-ColBERT for comparison. All our models are fine-tuned with batch size 96 and learning rate 7 \u00d7 10 \u22126 for 500K steps on a single TPU-V2. For TCT-ColBERT, there are two steps in our training procedure: (1) finetune \u03c6\u03b8 ;MaxSim as our teacher model, (2) freeze \u03c6\u03b8 ;MaxSim and distill knowledge into our student model \u03c6 \u03b8 . We keep all the hyperparameter settings the same but adjust temperature \u03c4 = 0.25 for KD at the second step. For all our models, including the baseline, we initialize the student model using the fine-tuned weights of the teacher model in the first step. We limit the input tokens to 32 (150) for queries (passages). To evaluate effectiveness, we encode all passages in the corpus and conduct brute force search over the vector representations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 361, |
|
"text": "Hofst\u00e4tter et al. (2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 313, |
|
"end": 320, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training with BM25 Negatives", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Our main results, including paired t-test for significance testing, are shown in Table 2 . In addition to the effectiveness of the student models, we also show the effectiveness of the teacher models for the KD methods. 1 First, we see that pairwise KD methods show significant improvements over the baseline, indicat- ing that information from BM25 negatives cannot be fully exploited without teacher models. Second, although KD-T2 improves the bi-encoder's effectiveness over KD-T1, it is not consistently better than KD-ColBERT in terms of students' effectiveness. We suspect that they have comparable capabilities to discriminate most paired passages (BM25 negative vs. positive samples), i.e., Col-BERT is good enough to guide bi-encoder student models to discriminate them. On the other hand, our TCT-ColBERT model, which uses only one teacher model and adds only 33% more training time over the baseline, yields the best effectiveness, demonstrating the advantages of our proposed inbatch KD -exhaustive exploitation of all querydocument combinations in a minibatch.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 221, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 88, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training with BM25 Negatives", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "To understand why TCT-ColBERT yields better results, we study the models' retrieval effectiveness against carefully selected distractors. We start with a small synthetic corpus composed of the relevant passages and the top-1000 BM25 candidates of the 6980 (43) queries from MARCO Dev (TREC-DL '19). To increase the corpus size, we gradually add passages uniformly sampled from the corpus without replacement. From Figure 2 , we see that the three KD models exhibit nearly the same effectiveness when the corpus only contains BM25 candidates. This shows that the bi-encoders learn to discriminate relevant passages from the BM25 negative samples well. However, as the index size increases, TCT-ColBERT demonstrates better ranking effectiveness than the other pairwise KD methods, indicating that the learned representations are more robust. We attribute this robustness against \"distractors\" to the enriched information from inbatch KD, where we are able to exploit all in-batch query-document combinations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 414, |
|
"end": 422, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training with BM25 Negatives", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "In this subsection, we evaluate TCT-ColBERT when training with hard negatives (HNs). We compare our model to four competitive approaches:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with Hard Negatives", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "1. ANCE is the most representative work, which proposes asynchronous index refreshes to mine hard negatives. The model is trained for 600K steps with index refreshes every 10K steps. ANCE uses RoBERTa-base as its backbone.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with Hard Negatives", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "2. LTRe (Zhan et al., 2020) further improves from an ANCE checkpoint by adding more training steps with the same hard negative mining approach; thus, the computation cost of index refreshes from ANCE cannot be neglected. LTRe also use RoBERTa-base as its backbone.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 27, |
|
"text": "(Zhan et al., 2020)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with Hard Negatives", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "3. SEED-Encoder (Lu et al., 2021 ) leverages a pretraining strategy to enhance the capability of the bi-encoder, which is further fine-tuned with HNs using asynchronous index refreshes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 32, |
|
"text": "(Lu et al., 2021", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training with Hard Negatives", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "RocketQA (Qu et al., 2020) trains a bi-encoder model using hard negatives denoised by a crossencoder, ERNIE-2.0-Large (Sun et al., 2019) . It further demonstrates that training bi-encoders with many in-batch negatives (batch size up to 4096) significantly improves ranking effectiveness; however, this approach is computationally expensive (the authors report using 8\u00d7V100 GPUs for training). To the best of our knowledge, RocketQA represents the state of the art in single-vector bi-encoders for dense retrieval. For a more fair comparison, we also report the ranking effectiveness of their model trained with a smaller batch size of 128.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 26, |
|
"text": "(Qu et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 136, |
|
"text": "(Sun et al., 2019)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For all the approaches above, we directly copy the reported effectiveness from the original papers. For our TCT-ColBERT model, following the settings of the above approaches, we first use our TCT-ColBERT model trained on BM25 negatives as a warm-up starting point and index all 8.8M MARCO passages. Using the warmed-up index, we retrieve top-200 passages for each training query and randomly sample (with replacement) hard negatives from the 200 candidates to form our training data. Note that due to resource limitations we do not conduct experiments with asynchronous index refreshes since multiple V100 GPUs are required for such a model training scheme. 2 In this experiment, all the hyperparameter settings are the same as the ones in the BM25 negative training, except for training steps, which is set to 100K for both student and teacher training. Table 3 reports the results of our experiments with hard negative training. First, we observe that our TCT-ColBERT model trained with BM25 negatives marginally outperforms the other models trained with HNs, except for RocketQA. Comparing the different training strategies discussed in Section 3.3 (second main block of the table), we see that the ranking effectiveness of TCT-ColBERT (HN) degrades when training on hard negatives without the guide of a teacher. This is consistent with the findings of Qu et al. (2020) that hard negatives contain noisy information (i.e., some hard negatives may actually be relevant). Also, show that training bi-encoders with hard negatives can be unstable: hard negatives benefit ranking effectiveness only under certain hyperparameter settings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 855, |
|
"end": 862, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In contrast, hard negative training using Col-BERT's in-batch KD further boosts ranking effectiveness, especially when our teacher (ColBERT)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2 Re-encoding the entire corpus takes \u223c10 hours on one GPU.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is trained with the same hard negative samples beforehand. It is also worth noting that our TCT-ColBERT (w/ TCT HN+) with batch size 96 yields competitive ranking effectiveness compared to RocketQA (the current state of the art), which uses batch size 4096. These results demonstrate the advantages of our TCT design: our approach effectively exploits hard negatives in a computationally efficient manner (i.e., without the need for large batch sizes or periodic index refreshes).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4.", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To validate the effectiveness and generality of our training strategy, we conduct further experiments on document retrieval using the MS MARCO document ranking dataset. This dataset contains 3.2M web pages gathered from passages in the MS MARCO passage ranking dataset. Similar to the passage condition, we evaluate model effectiveness on two test sets of queries:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "1. MARCO Dev: the development set contains 5193 queries, each with exactly one relevant document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "2. TREC-DL '19: graded relevance judgments are available from the TREC 2019 Deep Learning Track, but on only 43 queries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Per official guidelines, we report different metrics for the two query sets: MRR@100 for MARCO Dev and NDCG@10 for TREC-DL '19. Following the FirstP setting for document retrieval described in , we feed the first 512 tokens of each document for encoding, and start with the warmed-up checkpoint for our encoder's parameters trained for passage retrieval (using BM25 negatives, as described in Section 4.1.1). The settings for fine-tuning our warmed-up encoder Table 4 : Document retrieval results using the FirstP approach. All our implemented models are labeled with a number and superscripts represent significant improvements over the labeled model (paired t-test, p < 0.05).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 460, |
|
"end": 467, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Document Retrieval", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "MARCO Dev TREC-DL '19", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "MRR@100 NDCG@10 ANCE .368 .614 LTRe (Zhan et al., 2020) -.634 SEED-Encoder (Lu et al., 2021) .394 -", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 55, |
|
"text": "(Zhan et al., 2020)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 75, |
|
"end": 92, |
|
"text": "(Lu et al., 2021)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(1) TCT-ColBERT .339 .573 (2) w/ TCT HN+ .392 1 .613 (3) w/ 2\u00d7 TCT HN+ .418 1,2 .650 1, 2 (e.g., learning rate, training steps, top-200 negative sampling) are the same as passage retrieval except for batch size, which is set to 64.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 87, |
|
"text": "1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 89, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Ranking effectiveness is reported in Table 4 . First, we observe that TCT-ColBERT (our warmedup checkpoint) performs far worse than other approaches to document retrieval using the FirstP method. This may be due to the fact that FirstP document retrieval is very different from passage retrieval, making zero-shot transfer ineffective. After applying HN training on both teacher and student models (condition 2), the ranking effectiveness increases significantly. In addition, we find that another iteration of training with an index refresh (condition 3) further improves ranking effectiveness. To sum up, in the document ranking task, TCT-ColBERT yields competitive effectiveness with a one-time index refresh and outperforms other computationally expensive methods with one additional index refresh.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 44, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our final set of experiments, we show that dense retrieval with single-vector representations can be integrated with results from sparse retrieval to further increase effectiveness. We illustrate the endto-end tradeoffs in terms of quality, time, and space of different dense-sparse hybrid combinations on the passage retrieval tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Many papers (Luan et al., 2021; Gao et al., 2020b; have demonstrated that sparse retrieval can complement dense retrieval via a simple linear combination of their scores. In our implementation, for each query q, we use sparse and dense techniques to retrieve the top-1000 passages, D sp and D ds , with their relevance scores, \u03c6 sp (q, p \u2208 D sp ) and \u03c6 ds (q, p \u2208 D ds ), respectively. Then, we compute the final relevance score for each retrieved passage \u03c6(q, p), where p \u2208 D sp \u222a D ds , as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 31, |
|
"text": "(Luan et al., 2021;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 32, |
|
"end": 50, |
|
"text": "Gao et al., 2020b;", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f3 \u03b1 \u2022 \u03c6 sp (q, p) + min p\u2208D ds \u03c6 ds (q, p), if p / \u2208 D ds \u03b1 \u2022 min p\u2208Dsp \u03c6 sp (q, p) + \u03c6 ds (q, p), if p / \u2208 D sp \u03b1 \u2022 \u03c6 sp (q, p) + \u03c6 ds (q, p), otherwise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "This technique is an approximation of a linear combination of sparse and dense retrieval scores. Specifically, if p / \u2208 D sp (or D ds ), we instead use the minimum score of", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u03c6 sp (q, p \u2208 D sp ), or \u03c6 ds (q, p \u2208 D ds ) as a substitute.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For the sparse and dense retrieval combinations, we tune the hyperparameter \u03b1 on 6000 randomly sampled queries from the MS MARCO training set. We conduct dense-sparse hybrid experiments with sparse retrieval (BM25 ranking) on the original passages (denoted BM25) and on passages with docTTTTTquery document expansion (Nogueira and Lin, 2019 ) (denoted doc2query-T5). To characterize end-to-end effectiveness and efficiency, we perform sparse retrieval with the Pyserini toolkit and dense retrieval with Faiss (Johnson et al., 2017) , but implement the score combination in separate custom code. Table 5 shows passage retrieval results in terms of ranking effectiveness, query latency, and storage requirements (i.e., index size) for each model and Table 6 reports the component latencies of our TCT-ColBERT dense-sparse hybrid. 3 The crossencoder reranker of Nogueira and Cho (2019) provides a point of reference for multi-stage reranking designs, which is effective but slow.", |
|
"cite_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 340, |
|
"text": "(Nogueira and Lin, 2019", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 531, |
|
"text": "(Johnson et al., 2017)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 595, |
|
"end": 602, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 755, |
|
"text": "Table 6", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Generally, dense retrieval methods (whether single-vector or multi-vector) are more effective but slower than sparse retrieval methods, which rely on bag-of-words querying using inverted indexes. Single-vector dense models also require more space than sparse retrieval methods. Moving Table 5 : End-to-end comparisons of output quality, query latency, and storage requirements for passage retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 292, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dense-Sparse Hybrids", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Latency Storage MARCO Dev TREC-DL '19 ms/q GiB Sparse retrieval BM25 with Anserini (Yang et al., 2018) .184 .506 55 4 DeepCT (Dai and Callan, 2020) .243 .551 55 4 doc2query-T5 (Nogueira and Lin, 2019) .277 .551 64 14", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 102, |
|
"text": "(Yang et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 147, |
|
"text": "(Dai and Callan, 2020)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 200, |
|
"text": "(Nogueira and Lin, 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Dense retrieval: single-vector TAS-B (Hofst\u00e4tter et al., 2021) .343 .722 64 13 RocketQA (Qu et al., 2020) .370 -107 b 13 a TCT-ColBERT .344 .685 107 13 TCT-ColBERT (w/ TCT HN+)", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 62, |
|
"text": "(Hofst\u00e4tter et al., 2021)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 105, |
|
"text": "(Qu et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ".359 .719 107 13 Dense retrieval: multi-vector ME-BERT (Luan et al., 2021) .334 .687 -96 ColBERT (Khattab and Zaharia, 2020) .360 -458 154", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 74, |
|
"text": "(Luan et al., 2021)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 124, |
|
"text": "(Khattab and Zaharia, 2020)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hybrid dense + sparse CLEAR (Gao et al., 2020b) .338 .699 -17 a ME-HYBRID-E (Luan et al., 2021) .343 .706 -100 TAS-B + doc2query-T5 (Hofst\u00e4tter et al., 2021) .360 .753 67 27 a TCT-ColBERT + BM25", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 47, |
|
"text": "(Gao et al., 2020b)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 76, |
|
"end": 95, |
|
"text": "(Luan et al., 2021)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 157, |
|
"text": "(Hofst\u00e4tter et al., 2021)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ".356 .720 110 17 TCT-ColBERT + doc2query-T5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ".366 .734 110 27 TCT-ColBERT (w/ TCT HN+) + BM25", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ".369 .730 110 17 TCT-ColBERT (w/ TCT HN+) + doc2query-T5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ".375 .741 110 27", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Multi-stage reranking BM25 + BERT-large (Nogueira and Cho, 2019) .365 .736 3500 4 TAS-B + doc2query-T5 + Mono-Duo-T5 (Hofst\u00e4tter et al., 2021) .421 .759 12800 27 a RocketQA with reranking (Qu et al., 2020) .439 --13 a a We estimate dense index size using 16-bit floats; for hybrid, we add the sizes of sparse and dense indexes. b We assume latency comparable to our settings. from single-vector to multi-vector dense models, we see that ColBERT exhibits higher effectiveness but is slower and requires much more storage. Finally, when integrated with sparse retrieval methods, TCT-ColBERT is able to beat a basic multi-stage reranking design (BM25 + BERTlarge), but with much lower query latency, although at the cost of increased storage. Hybrid TCT-ColBERT (w/ TCT HN+) + doc2query-T5 compares favorably with a recent advanced model, TAS-B + doc2query-T5 (Hofst\u00e4tter et al., 2021) , which introduces topic-aware sampling and dual teachers, incorporating part of our TCT-ColBERT work. Nevertheless, even the best hybrid variant of TCT-ColBERT alone, without further reranking, remains quite some distance from RocketQA, the current state of the art (with reranking using cross-encoders). This suggests that there remain relevance signals that require full attention interactions to exploit.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 64, |
|
"text": "(Nogueira and Cho, 2019)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 142, |
|
"text": "(Hofst\u00e4tter et al., 2021)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 205, |
|
"text": "(Qu et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 329, |
|
"text": "b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 857, |
|
"end": 882, |
|
"text": "(Hofst\u00e4tter et al., 2021)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking effectiveness", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Improving the effectiveness of single-vector biencoders is an important research direction in dense retrieval because of lower latency and storage requirements compared to multi-vector approaches. We propose a teacher-student knowledge distillation approach using tightly coupled bi-encoders that enables exhaustive use of query-passage combinations in each minibatch. More importantly, a bi-encoder teacher requires less computation than a cross-encoder teacher. Finally, our approach leads to robust learned representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Overall, our hard negative sampling strategy leads to an effective and efficient dense retrieval technique, which can be further combined with sparse retrieval techniques in dense-sparse hybrids. Together, these designs provide a promising solution for end-to-end text retrieval that balances quality, query latency, and storage requirements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We report our trained ColBERT's accuracy by reranking the top-1000 candidates provided officially.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here we assume running dense and sparse retrieval in parallel.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was supported in part by the Canada First Research Excellence Fund and the Natural Sciences and Engineering Research Council (NSERC) of Canada.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "MS MARCO: A human generated machine reading comprehension dataset", |
|
"authors": [ |
|
{ |
|
"first": "Payal", |
|
"middle": [], |
|
"last": "Bajaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Campos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Craswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rangan", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mcnamara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhaskar", |
|
"middle": [], |
|
"last": "Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tri", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.09268" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Payal Bajaj, Daniel Campos, Nick Craswell, Li Deng, Jianfeng Gao, Xiaodong Liu, Rangan Majumder, Andrew McNamara, Bhaskar Mitra, Tri Nguyen, et al. 2016. MS MARCO: A human gen- erated machine reading comprehension dataset. arXiv:1611.09268.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Scalable attentive sentence-pair modeling via distilled sentence embedding", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Barkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Razin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Itzik", |
|
"middle": [], |
|
"last": "Malkiel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ori", |
|
"middle": [], |
|
"last": "Katz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avi", |
|
"middle": [], |
|
"last": "Caciularu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Koenigstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. AAAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Barkan, Noam Razin, Itzik Malkiel, Ori Katz, Avi Caciularu, and Noam Koenigstein. 2020. Scal- able attentive sentence-pair modeling via distilled sentence embedding. In Proc. AAAI.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Pre-training tasks for embedding-based large-scale retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Wei-Cheng", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yin-Wen", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjiv", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei-Cheng Chang, Felix X. Yu, Yin-Wen Chang, Yim- ing Yang, and Sanjiv Kumar. 2020. Pre-training tasks for embedding-based large-scale retrieval. In Proc. ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Overview of the TREC 2019 deep learning track", |
|
"authors": [ |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Craswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitra", |
|
"middle": [], |
|
"last": "Bhaskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Campos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. TREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nick Craswell, Bhaskar Mitra, and Daniel Campos. 2019. Overview of the TREC 2019 deep learning track. In Proc. TREC.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Context-aware term weighting for first stage passage retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Zhuyun", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Callan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1533--1536", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhuyun Dai and Jamie Callan. 2020. Context-aware term weighting for first stage passage retrieval. In Proc. SIGIR, page 1533-1536.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proc. NAACL, pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Understanding BERT rankers under distillation", |
|
"authors": [ |
|
{ |
|
"first": "Luyu", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhuyun", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Callan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. ICTIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--152", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luyu Gao, Zhuyun Dai, and Jamie Callan. 2020a. Un- derstanding BERT rankers under distillation. In Proc. ICTIR, pages 149-152.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Complementing lexical retrieval with semantic residual embedding", |
|
"authors": [ |
|
{ |
|
"first": "Luyu", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhuyun", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Callan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.13969" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luyu Gao, Zhuyun Dai, Zhen Fan, and Jamie Callan. 2020b. Complementing lexical retrieval with seman- tic residual embedding. arXiv:2004.13969.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "REALM: Retrieval-augmented language model pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Kelvin", |
|
"middle": [], |
|
"last": "Guu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zora", |
|
"middle": [], |
|
"last": "Tung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Panupong", |
|
"middle": [], |
|
"last": "Pasupat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2002.08909" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pa- supat, and Ming-Wei Chang. 2020. REALM: Retrieval-augmented language model pre-training. arXiv:2002.08909.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Distilling the knowledge in a neural network", |
|
"authors": [ |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. NeurIPS: Deep Learning and Representation Learning Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the knowledge in a neural network. In Proc. NeurIPS: Deep Learning and Representa- tion Learning Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Improving efficient neural ranking models with cross-architecture knowledge distillation", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Hofst\u00e4tter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Althammer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schr\u00f6der", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mete", |
|
"middle": [], |
|
"last": "Sertkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allan", |
|
"middle": [], |
|
"last": "Hanbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.02666v2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Hofst\u00e4tter, Sophia Althammer, Michael Schr\u00f6der, Mete Sertkan, and Allan Hanbury. 2020. Improving efficient neural ranking mod- els with cross-architecture knowledge distillation. arXiv:2010.02666v2.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Let's measure run time! Extending the IR replicability infrastructure to include performance aspects", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Hofst\u00e4tter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allan", |
|
"middle": [], |
|
"last": "Hanbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. OSIRRC: CEUR Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "12--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Hofst\u00e4tter and Allan Hanbury. 2019. Let's measure run time! Extending the IR replicability in- frastructure to include performance aspects. In Proc. OSIRRC: CEUR Workshop, pages 12-16.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Efficiently teaching an effective dense retriever with balanced topic aware sampling", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Hofst\u00e4tter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Chieh", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jheng-Hong", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Allan", |
|
"middle": [], |
|
"last": "Hanbury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proc. SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Hofst\u00e4tter, Sheng-Chieh Lin, Jheng-Hong Yang, Jimmy Lin, and Allan Hanbury. 2021. Effi- ciently teaching an effective dense retriever with bal- anced topic aware sampling. In Proc. SIGIR.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Poly-encoders: Architectures and pre-training strategies for fast and accurate multi-sentence scoring", |
|
"authors": [ |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Humeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kurt", |
|
"middle": [], |
|
"last": "Shuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Anne", |
|
"middle": [], |
|
"last": "Lachaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel Humeau, Kurt Shuster, Marie-Anne Lachaux, and Jason Weston. 2020. Poly-encoders: Architec- tures and pre-training strategies for fast and accurate multi-sentence scoring. In Proc. ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Billion-scale similarity search with GPUs", |
|
"authors": [ |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthijs", |
|
"middle": [], |
|
"last": "Douze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.08734" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeff Johnson, Matthijs Douze, and Herv\u00e9 J\u00e9gou. 2017. Billion-scale similarity search with GPUs. arXiv:1702.08734.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Dense passage retrieval for open-domain question answering", |
|
"authors": [ |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Karpukhin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barlas", |
|
"middle": [], |
|
"last": "Oguz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sewon", |
|
"middle": [], |
|
"last": "Min", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ledell", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Edunov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen-Tau", |
|
"middle": [], |
|
"last": "Yih", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6769--6781", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proc. EMNLP, pages 6769- 6781.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "ColBERT: Efficient and effective passage search via contextualized late interaction over BERT", |
|
"authors": [ |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Khattab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matei", |
|
"middle": [], |
|
"last": "Zaharia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proc. SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omar Khattab and Matei Zaharia. 2020. ColBERT: Ef- ficient and effective passage search via contextual- ized late interaction over BERT. In Proc. SIGIR, page 39-48.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Latent retrieval for weakly supervised open domain question answering", |
|
"authors": [ |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6086--6096", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019. Latent retrieval for weakly supervised open domain question answering. In Proc. ACL, pages 6086-6096.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Pyserini: A Python toolkit for reproducible information retrieval research with sparse and dense representations", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xueguang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Chieh", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jheng-Hong", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronak", |
|
"middle": [], |
|
"last": "Pradeep", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proc. SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng- Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python toolkit for reproducible information retrieval research with sparse and dense representations. In Proc. SIGIR.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Pretrained transformers for text ranking: BERT and beyond", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Yates", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.06467" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lin, Rodrigo Nogueira, and Andrew Yates. 2020. Pretrained transformers for text ranking: BERT and beyond. arXiv:2010.06467.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "An investigation of practical approximate nearest neighbor algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "825--832", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ting Liu, Andrew W. Moore, Alexander Gray, and Ke Yang. 2004. An investigation of practical ap- proximate nearest neighbor algorithms. In Proc. NeurIPS, page 825-832.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Less is more: Pretraining a strong siamese encoder using a weak decoder", |
|
"authors": [ |
|
{ |
|
"first": "Shuqi", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenyan", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guolin", |
|
"middle": [], |
|
"last": "Ke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Waleed", |
|
"middle": [], |
|
"last": "Malik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhicheng", |
|
"middle": [], |
|
"last": "Dou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Bennett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tieyan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arnold", |
|
"middle": [], |
|
"last": "Overwijk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2102.09206" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuqi Lu, Chenyan Xiong, Di He, Guolin Ke, Waleed Malik, Zhicheng Dou, Paul Bennett, Tieyan Liu, and Arnold Overwijk. 2021. Less is more: Pre- training a strong siamese encoder using a weak de- coder. arXiv:2102.09206.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Sparse, dense, and attentional representations for text retrieval. Transactions of the Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "329--345", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Jacob Eisenstein, Kristina Toutanova, and Michael Collins. 2021. Sparse, dense, and atten- tional representations for text retrieval. Transac- tions of the Association for Computational Linguis- tics, 9:329-345.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A replication study of dense passage retriever", |
|
"authors": [ |
|
{ |
|
"first": "Xueguang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronak", |
|
"middle": [], |
|
"last": "Pradeep", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.05740" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xueguang Ma, Kai Sun, Ronak Pradeep, and Jimmy Lin. 2021. A replication study of dense passage re- triever. arXiv:2104.05740.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Efficient and robust approximate nearest neighbor search using hierarchical navigable small world graphs. Transactions on Pattern Analysis and Machine Intelligence", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Malkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Yashunin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "42", |
|
"issue": "", |
|
"pages": "824--836", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu A. Malkov and D. A. Yashunin. 2020. Efficient and robust approximate nearest neighbor search using hi- erarchical navigable small world graphs. Transac- tions on Pattern Analysis and Machine Intelligence, 42(4):824-836.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Learning word embeddings efficiently with noise-contrastive estimation", |
|
"authors": [ |
|
{ |
|
"first": "Andriy", |
|
"middle": [], |
|
"last": "Mnih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2265--2273", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andriy Mnih and Koray Kavukcuoglu. 2013. Learning word embeddings efficiently with noise-contrastive estimation. In Proc. NIPS, pages 2265-2273.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Passage re-ranking with BERT", |
|
"authors": [ |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.04085" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Nogueira and Kyunghyun Cho. 2019. Passage re-ranking with BERT. arXiv:1901.04085.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "From doc2query to docTTTTTquery", |
|
"authors": [ |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rodrigo Nogueira and Jimmy Lin. 2019. From doc2query to docTTTTTquery.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "RocketQA: An optimized training approach to dense passage retrieval for open-domain question answering", |
|
"authors": [ |
|
{ |
|
"first": "Yingqi", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuchen", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiyang", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wayne", |
|
"middle": [ |
|
"Xin" |
|
], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daxiang", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yingqi Qu, Yuchen Ding, Jing Liu, Kai Liu, Ruiyang Ren, Wayne Xin Zhao, Daxiang Dong, Hua Wu, and Haifeng Wang. 2020. RocketQA: An optimized training approach to dense pas- sage retrieval for open-domain question answering. arxiv:2010.08191v1.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Sentence-BERT: Sentence embeddings using Siamese BERTnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3982--3992", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- BERT: Sentence embeddings using Siamese BERT- networks. In Proc. EMNLP, pages 3982-3992.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "ERNIE 2.0: A continual pre-training framework for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuohuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shikun", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Hao Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.12412" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. 2019. ERNIE 2.0: A continual pre-training framework for lan- guage understanding. arXiv:1907.12412.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Approximate nearest neighbor negative contrastive learning for dense text retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Lee", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenyan", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ye", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kwok-Fung", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jialin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Bennett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junaid", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arnold", |
|
"middle": [], |
|
"last": "Overwijk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proc. ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul Bennett, Junaid Ahmed, and Arnold Overwijk. 2021. Approximate nearest neighbor neg- ative contrastive learning for dense text retrieval. In Proc. ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Anserini: Reproducible ranking baselines using Lucene. Journal of Data and Information Quality", |
|
"authors": [ |
|
{ |
|
"first": "Peilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peilin Yang, Hui Fang, and Jimmy Lin. 2018. Anserini: Reproducible ranking baselines using Lucene. Jour- nal of Data and Information Quality, 10(4):Article 16.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Learning to retrieve: How to train a dense retrieval model effectively and efficiently", |
|
"authors": [ |
|
{ |
|
"first": "Jingtao", |
|
"middle": [], |
|
"last": "Zhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaxin", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiqun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoping", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.10469" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingtao Zhan, Jiaxin Mao, Yiqun Liu, Min Zhang, and Shaoping Ma. 2020. Learning to retrieve: How to train a dense retrieval model effectively and ef- ficiently. arXiv:2010.10469.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Passage retrieval effectiveness on a synthetic corpus comprising relevant passages and BM25 results as additional \"distractors\" randomly sampled from the corpus are added.", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Passage retrieval results with BM25 negative training. For knowledge distillation (KD) methods, the effectiveness of teacher (T) models is also reported. All our implemented models are labeled with a number and superscripts represent significant improvements over the labeled model (paired t-test, p < 0.05).", |
|
"content": "<table><tr><td>Strategy</td><td>Model</td><td># params of Teacher</td><td colspan=\"4\">MARCO Dev MRR@10 (T/S) R@1K NDCG@10 (T/S) R@1K TREC-DL '19</td></tr><tr><td>-</td><td>(1) Baseline</td><td>-</td><td>-/ .310</td><td>.945</td><td>-/ .626</td><td>.658</td></tr><tr><td/><td>KD-T1 (Hofst\u00e4tter et al., 2020)</td><td>110M</td><td>.376 / .304</td><td>.931</td><td>.730 / .631</td><td>.702</td></tr><tr><td>Pairwise KD</td><td>KD-T2 (Hofst\u00e4tter et al., 2020) (2) KD-T2 (Ours)</td><td>467M 467M</td><td>.399 / .315 .399 / .341 1</td><td>.947 .964 1</td><td>.743 / .668 .743 / .659 1</td><td>.737 .708 1</td></tr><tr><td/><td>(3) KD-ColBERT</td><td>110M</td><td>.350 / .339 1</td><td>.962 1</td><td>.730 / .670 1</td><td>.710 1</td></tr></table>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"text": "Passage retrieval results with hard negative training. All our implemented models are labeled with a number and superscripts represent significant improvements over the labeled model (paired t-test, p < 0.05).", |
|
"content": "<table><tr><td>Model</td><td># Index Refresh</td><td>Batch Size</td><td colspan=\"4\">MARCO Dev MRR@10 R@1K NDCG@10 R@1K TREC-DL '19</td></tr><tr><td>ANCE (Xiong et al., 2021)</td><td>60</td><td>32</td><td>.330</td><td>.959</td><td>.648</td><td>-</td></tr><tr><td>LTRe (Zhan et al., 2020)</td><td>60</td><td>32</td><td>.341</td><td>.962</td><td>.675</td><td>-</td></tr><tr><td colspan=\"2\">SEED-Encoder (Lu et al., 2021) \u226510 (est.)</td><td>-</td><td>.339</td><td>.961</td><td>-</td><td>-</td></tr><tr><td>RocketQA (Qu et al., 2020)</td><td>1</td><td colspan=\"2\">128 .310</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RocketQA (Qu et al., 2020)</td><td>1</td><td>4096</td><td>.364</td><td>-</td><td>-</td><td>-</td></tr><tr><td>(1) TCT-ColBERT</td><td>0</td><td colspan=\"2\">96 .344</td><td>.967</td><td>.685</td><td>.745</td></tr><tr><td>(2) w/ HN</td><td>1</td><td>96</td><td>.237</td><td>.929</td><td>.543</td><td>.674</td></tr><tr><td>(3) w/ TCT HN</td><td>1</td><td>96</td><td>.354 1,2</td><td colspan=\"2\">.971 1,2 .705 2</td><td>.765 1,2</td></tr><tr><td>(4) w/ TCT HN+</td><td>1</td><td>96</td><td>.359 1,2</td><td>.970 1</td><td>.719 1,2</td><td>.760 1</td></tr></table>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"text": "Component latencies per query of our model.", |
|
"content": "<table><tr><td>Stage</td><td colspan=\"2\">latency (ms) device</td></tr><tr><td>BERT query encoder</td><td>7</td><td>GPU</td></tr><tr><td>Dot product search</td><td>100</td><td>GPU</td></tr><tr><td>Score combination</td><td>3</td><td>CPU</td></tr></table>", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |