File size: 4,758 Bytes
e08c10a
 
 
 
 
 
aa29194
e08c10a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bee4a93
e08c10a
aa29194
e08c10a
 
 
 
 
 
 
 
 
4867789
e08c10a
 
 
 
bee4a93
 
 
 
6fc5058
e08c10a
 
 
 
 
 
 
 
 
 
 
 
 
 
6328fec
 
 
 
 
 
e08c10a
 
 
6328fec
e08c10a
 
 
6328fec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import datasets
import pandas as pd
import numpy as np

logger = datasets.logging.get_logger(__name__)

_DATA_PATH = "https://huggingface.co/datasets/conversy/vps_clustering_benchmark/resolve/main/dataset.pkl"

class VPClusteringBenchmarkConfig(datasets.BuilderConfig):
    """BuilderConfig for Conversy Benchmark."""

    def __init__(self, name, version, **kwargs):
        """BuilderConfig for Conversy Benchmark.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        self.name = name
        self.version = version
        self.features = kwargs.pop("features", None)
        self.description = kwargs.pop("description", None)
        self.data_url = kwargs.pop("data_url", None)
        self.nb_data_shards = kwargs.pop("nb_data_shards", None)

        super(VPClusteringBenchmarkConfig, self).__init__(
            name=name,
            version=version,
            **kwargs
        )


class VPClusteringBenchmark(datasets.GeneratorBasedBuilder):
    """Conversy benchmark"""
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS = [
        VPClusteringBenchmarkConfig(
                name="VPClusteringBenchmark",
                version=VERSION,
                description="Conversy Benchmark for ML models evaluation",
                features=["segment_id", "filename", "speaker", "duration", "vp",
                          "start", "end", "readable_start", "readable_end",
                          "segment_clean"],
                data_url=_DATA_PATH,
                nb_data_shards=1)
    ]

    def _info(self):
        description = (
            "Voice Print Clustering Benchmark"
        )
        features = datasets.Features(
            {
                "segment_id": datasets.Value("int32"),
                "filename": datasets.Value("string"),
                "speaker": datasets.Value("string"),
                "duration": datasets.Value("float32"),
                "segment_clean": datasets.Value("bool"),
                "start": datasets.Value("float32"),
                "end": datasets.Value("float32"),
                "readable_start": datasets.Value("string"),
                "readable_end": datasets.Value("string"),
                "vp": datasets.Sequence(datasets.Value("float32"))
            })
        return datasets.DatasetInfo(
            description=description,
            features=features,
            supervised_keys=None,
            version=self.config.version
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        data_url = self.config.data_url
        downloaded_file = dl_manager.download_and_extract(data_url)
        return [
            datasets.SplitGenerator(
                name="segments",
                gen_kwargs={"file_path": downloaded_file, "split": "segments"},
            ),
            datasets.SplitGenerator(
                name="files",
                gen_kwargs={"file_path": downloaded_file, "split": "files"},
            ),
        ]

    def _generate_examples(self, file_path, split):
        """Yields examples."""
        df = pd.read_pickle(file_path)

        if split == "segments":
            for idx, row in df.iterrows():
                yield idx, {
                    "segment_id": row["segment_id"],
                    "filename": row["filename"],
                    "speaker": row["speaker"],
                    "duration": row["duration"],
                    "segment_clean": row["segment_clean"],
                    "start": row['start'],
                    "end": row['end'],
                    "readable_start": row['readable_start'],
                    "readable_end": row['readable_end'],
                    "vp": np.asarray(row["vp"], dtype=np.float32)
                }
        elif split == "files":
            files = {}
            for idx, row in df.iterrows():
                if row["filename"] not in files:
                    files[row["filename"]] = {
                        "filename": row["filename"],
                        "segments": []
                    }
                files[row["filename"]]["segments"].append({
                    "segment_id": row["segment_id"],
                    "speaker": row["speaker"],
                    "duration": row["duration"],
                    "segment_clean": row["segment_clean"],
                    "start": row['start'],
                    "end": row['end'],
                    "readable_start": row['readable_start'],
                    "readable_end": row['readable_end'],
                    "vp": np.asarray(row["vp"], dtype=np.float32)
                })
            
            for idx, file_data in enumerate(files.values()):
                yield idx, file_data