clustering_segments / vps_clustering_benchmark.py
Guscerra's picture
Snap
6fc5058
raw
history blame
3.41 kB
import os
import json
import datasets
import pandas as pd
import numpy as np
from typing import List
from tqdm import tqdm
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
This dataset consists of a small sample of audio clips with annotated
speaker identities, their age and gender, diarization-based speech segments,
and transcription. The dataset is intended for the benchmarking of VBI-core.
"""
_HF_REPO_PATH = "https://huggingface.co/datasets/conversy/vps_clustering_benchmark/resolve/main/voice_prints.pkl"
class VPClusteringBenchmarkConfig(datasets.BuilderConfig):
"""BuilderConfig for Conversy Benchmark."""
def __init__(self, name, version, **kwargs):
"""BuilderConfig for Conversy Benchmark.
Args:
**kwargs: keyword arguments forwarded to super.
"""
self.name = name
self.version = version
self.features = kwargs.pop("features", None)
self.description = kwargs.pop("description", None)
self.data_url = kwargs.pop("data_url", None)
self.nb_data_shards = kwargs.pop("nb_data_shards", None)
super(VPClusteringBenchmarkConfig, self).__init__(
name=name,
version=version,
**kwargs
)
class VPClusteringBenchmark(datasets.GeneratorBasedBuilder):
"""Conversy benchmark"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
VPClusteringBenchmarkConfig(
name="VPClusteringBenchmark",
version=VERSION,
description="Conversy Benchmark for ML models evaluation",
features=["segment_id", "filename", "speaker", "duration", "vp",
"segment_clean"],
data_url=_HF_REPO_PATH,
nb_data_shards=1)
]
def _info(self):
description = (
"Voice Print Clustering Benchmark"
)
features = datasets.Features(
{
"segment_id": datasets.Value("string"),
"filename": datasets.Value("string"),
"speaker": datasets.Value("string"),
"duration": datasets.Value("float32"),
"segment_clean": datasets.Value("bool"),
"vp": datasets.Sequence(datasets.Value("float32"))
})
return datasets.DatasetInfo(
description=description,
features=features,
supervised_keys=None,
version=self.config.version
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_url = self.config.data_url
downloaded_file = dl_manager.download_and_extract(data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": downloaded_file},
),
]
def _generate_examples(self, file_path):
"""Yields examples."""
df = pd.read_pickle(file_path)
for idx, row in df.iterrows():
yield idx, {
"segment_id": row["segment_id"],
"filename": row["filename"],
"speaker": row["speaker"],
"duration": row["duration"],
"segment_clean": row["segment_clean"],
"vp": row["vp"].tolist() if isinstance(row["vp"], np.ndarray) else row["vp"]
}