File size: 1,360 Bytes
fa6c34a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from datasets import load_dataset, concatenate_datasets
from sentence_transformers.evaluation import InformationRetrievalEvaluator, SequentialEvaluator
from sentence_transformers.util import cos_sim
from src.utils.paths import TRAIN_JSON, TEST_JSON

def build_eval(matryoshka_dims: list[int] | tuple[int, ...]):
    test_dataset  = load_dataset("json", data_files=str(TEST_JSON),  split="train")
    train_dataset = load_dataset("json", data_files=str(TRAIN_JSON), split="train")

    aws_dataset = concatenate_datasets([train_dataset, test_dataset])

    corpus = dict(zip(aws_dataset["id"], aws_dataset["positive"]))

    queries = dict(zip(test_dataset["id"], test_dataset["anchor"]))

    relevant_docs: dict[int, list[int]] = {}
    g2c = {}
    for cid, g in zip(aws_dataset["id"], aws_dataset["global_id"]):
        g2c.setdefault(g, []).append(cid)

    for qid, g in zip(test_dataset["id"], test_dataset["global_id"]):
        relevant_docs[qid] = g2c.get(g, [])

    evaluators = []
    for dim in matryoshka_dims:
        ir = InformationRetrievalEvaluator(
            queries=queries,
            corpus=corpus,
            relevant_docs=relevant_docs,
            name=f"dim_{dim}",
            truncate_dim=dim,
            score_functions={"cosine": cos_sim},
        )
        evaluators.append(ir)

    return SequentialEvaluator(evaluators)