tombagby
commited on
Commit
·
7271056
1
Parent(s):
ce7adc1
Add dataset script.
Browse files
svq.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""SVQ data reading."""
|
2 |
+
|
3 |
+
import io
|
4 |
+
import os
|
5 |
+
from array_record.python import array_record_module as array_record
|
6 |
+
import datasets
|
7 |
+
import librosa
|
8 |
+
import numpy as np
|
9 |
+
import pandas as pd
|
10 |
+
from scipy.io import wavfile
|
11 |
+
|
12 |
+
|
13 |
+
def read_wav_bytes_to_normalized_float(
|
14 |
+
wav_bytes, resample_hz: float | None = None
|
15 |
+
):
|
16 |
+
"""Reads WAV bytes object and returns normalized float numpy array.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
wav_bytes: WAV bytes object.
|
20 |
+
resample_hz: Optional resample rate.
|
21 |
+
Returns:
|
22 |
+
(waveform, original sample rate before any resample)
|
23 |
+
"""
|
24 |
+
rate, data = wavfile.read(io.BytesIO(wav_bytes))
|
25 |
+
|
26 |
+
if data.ndim > 1 and data.shape[1] > 1:
|
27 |
+
raise ValueError("Only mono WAV files are supported.")
|
28 |
+
|
29 |
+
# Convert data to float and normalize
|
30 |
+
if data.dtype == np.int16:
|
31 |
+
x = data.astype(np.float32) / np.iinfo(np.int16).max
|
32 |
+
elif data.dtype == np.int32:
|
33 |
+
x = data.astype(np.float32) / np.iinfo(np.int32).max
|
34 |
+
elif data.dtype == np.float32:
|
35 |
+
x = data
|
36 |
+
else:
|
37 |
+
raise TypeError(f"Unsupported data type: {data.dtype}")
|
38 |
+
if resample_hz is not None and resample_hz != rate:
|
39 |
+
x = librosa.resample(x, orig_sr=rate, target_sr=resample_hz)
|
40 |
+
return x, rate
|
41 |
+
|
42 |
+
|
43 |
+
def read_utt_index(basepath):
|
44 |
+
"""Read utt_index.jsonl file to a dict of {uttid: path:index}."""
|
45 |
+
df = pd.read_json(os.path.join(basepath, "utt_index.jsonl"), lines=True)
|
46 |
+
return dict(zip(df["utt_id"], df["index"]))
|
47 |
+
|
48 |
+
|
49 |
+
class UttLookup:
|
50 |
+
"""Lookup utterances by utt_id with optional resampling.
|
51 |
+
|
52 |
+
Usage:
|
53 |
+
utt_lookup = UttLookup(basepath)
|
54 |
+
waveform = utt_lookup(utt_id)
|
55 |
+
"""
|
56 |
+
|
57 |
+
def __init__(self, basepath, resample_hz: float | None = None):
|
58 |
+
self.basepath = basepath
|
59 |
+
self.resample_hz = resample_hz
|
60 |
+
self.utt_id_to_path_idx = read_utt_index(basepath)
|
61 |
+
self.readers = {}
|
62 |
+
self.orig_sample_rate_ = None
|
63 |
+
|
64 |
+
@property
|
65 |
+
def orig_sample_rate(self):
|
66 |
+
if self.orig_sample_rate_ is None:
|
67 |
+
utt_id = next(iter(self.utt_id_to_path_idx))
|
68 |
+
self(utt_id)
|
69 |
+
return self.orig_sample_rate_
|
70 |
+
|
71 |
+
def __call__(self, utt_id: str):
|
72 |
+
path, idx = self.utt_id_to_path_idx[utt_id].split(":")
|
73 |
+
if path not in self.readers:
|
74 |
+
array_record_path = os.path.join(self.basepath, f"{path}.array_record")
|
75 |
+
self.readers[path] = array_record.ArrayRecordReader(
|
76 |
+
array_record_path
|
77 |
+
)
|
78 |
+
b = self.readers[path].read([int(idx)])
|
79 |
+
waveform, sample_rate = read_wav_bytes_to_normalized_float(
|
80 |
+
b[0], resample_hz=self.resample_hz
|
81 |
+
)
|
82 |
+
if self.orig_sample_rate_ is None:
|
83 |
+
self.orig_sample_rate_ = sample_rate
|
84 |
+
if sample_rate != self.orig_sample_rate_:
|
85 |
+
raise ValueError(
|
86 |
+
f"Sample rate mismatch: {sample_rate} != {self.orig_sample_rate_}"
|
87 |
+
)
|
88 |
+
return waveform
|
89 |
+
|
90 |
+
|
91 |
+
def generate_examples(filepath, resample_hz: float | None = None):
|
92 |
+
"""Generate examples from a jsonl task file."""
|
93 |
+
basepath = os.path.dirname(filepath)
|
94 |
+
utt_lookup = UttLookup(basepath, resample_hz=resample_hz)
|
95 |
+
task = pd.read_json(filepath, lines=True)
|
96 |
+
for ex in task.to_dict(orient="records"):
|
97 |
+
utt = utt_lookup(ex["utt_id"])
|
98 |
+
ex["waveform"] = utt
|
99 |
+
yield ex
|
100 |
+
|
101 |
+
|
102 |
+
_CITATION = """\
|
103 |
+
@InProceedings{mseb,
|
104 |
+
title = {Massive Sound Embedding Benchmark (MSEB)},
|
105 |
+
author={Georg Heigold, Ehsan Variani, Tom Bagby, Ji Ma, Cyril Allauzen, Shankar Kumar, Michael Riley}
|
106 |
+
year={2025}
|
107 |
+
}
|
108 |
+
"""
|
109 |
+
|
110 |
+
_NUM_SHARDS = 128 # Internal sharding for parallel data loading.
|
111 |
+
|
112 |
+
|
113 |
+
class SvqDataset(datasets.GeneratorBasedBuilder):
|
114 |
+
"""SVQ dataset."""
|
115 |
+
|
116 |
+
VERSION = datasets.Version("1.1.0")
|
117 |
+
|
118 |
+
BUILDER_CONFIGS = [
|
119 |
+
datasets.BuilderConfig(name=name, description=desc)
|
120 |
+
for name, desc in [
|
121 |
+
("span_reasoning_in_lang", "Span reasoning in language."),
|
122 |
+
("span_retrieval_in_lang", "Span retrieval in language."),
|
123 |
+
("span_reasoning_cross_lang", "Span reasoning cross language."),
|
124 |
+
("span_retrieval_cross_lang", "Span retrieval cross language."),
|
125 |
+
("passage_retrieval_in_lang", "Passage retrieval in language."),
|
126 |
+
("passage_retrieval_cross_lang", "Passage retrieval cross language."),
|
127 |
+
("document_retrieval_in_lang", "Document retrieval in language."),
|
128 |
+
(
|
129 |
+
"document_retrieval_cross_lang",
|
130 |
+
"Document retrieval cross language.",
|
131 |
+
),
|
132 |
+
]
|
133 |
+
]
|
134 |
+
|
135 |
+
DEFAULT_WRITER_BATCH_SIZE = 64
|
136 |
+
|
137 |
+
def _info(self):
|
138 |
+
task = self.config.name
|
139 |
+
features = {
|
140 |
+
"utt_id": datasets.Value("string"),
|
141 |
+
"waveform": datasets.Sequence(datasets.Value("float32")),
|
142 |
+
"text": datasets.Value("string"),
|
143 |
+
"locale": datasets.Value("string"),
|
144 |
+
"environment": datasets.Value("string"),
|
145 |
+
"speaker_id": datasets.Value("string"),
|
146 |
+
"speaker_age": datasets.Value("int32"),
|
147 |
+
"speaker_gender": datasets.Value("string"),
|
148 |
+
"page_id": datasets.Value("string"),
|
149 |
+
"page_title": datasets.Value("string"),
|
150 |
+
"passage_id": datasets.Value("string"),
|
151 |
+
"passage_text": datasets.Value("string"),
|
152 |
+
}
|
153 |
+
if "span" in task:
|
154 |
+
features["span"] = datasets.Value("string")
|
155 |
+
return datasets.DatasetInfo(
|
156 |
+
description=(
|
157 |
+
"Simple Voice Queries (SVQ) dataset, Task: span reasoning in"
|
158 |
+
" language."
|
159 |
+
),
|
160 |
+
features=datasets.Features(**features),
|
161 |
+
homepage="https://huggingface.co/datasets/google/svq",
|
162 |
+
license="Apache 2.0",
|
163 |
+
citation=_CITATION,
|
164 |
+
)
|
165 |
+
|
166 |
+
def _split_generators(self, dl_manager):
|
167 |
+
basepath = os.getcwd()
|
168 |
+
task = self.config.name
|
169 |
+
return [
|
170 |
+
datasets.SplitGenerator(
|
171 |
+
name="eval",
|
172 |
+
gen_kwargs={
|
173 |
+
"filepath": os.path.join(
|
174 |
+
basepath, f"{task}.jsonl"
|
175 |
+
),
|
176 |
+
"shards": list(range(_NUM_SHARDS)),
|
177 |
+
"resample_hz": 16000,
|
178 |
+
"task_name": task,
|
179 |
+
},
|
180 |
+
),
|
181 |
+
]
|
182 |
+
|
183 |
+
def _generate_examples(
|
184 |
+
self, filepath=None, shards=None, resample_hz=None, task_name=None
|
185 |
+
):
|
186 |
+
basepath = os.path.dirname(filepath)
|
187 |
+
utt_lookup = UttLookup(basepath, resample_hz=resample_hz)
|
188 |
+
task = pd.read_json(filepath, lines=True)
|
189 |
+
task = np.array_split(task, _NUM_SHARDS)
|
190 |
+
task_shards = [task[idx].to_dict(orient="records") for idx in shards]
|
191 |
+
del task
|
192 |
+
for shard in task_shards:
|
193 |
+
for ex in shard:
|
194 |
+
utt = utt_lookup(ex["utt_id"])
|
195 |
+
ex["waveform"] = utt
|
196 |
+
del ex["task"]
|
197 |
+
if "span" not in task_name:
|
198 |
+
del ex["span"]
|
199 |
+
yield "_".join([ex["utt_id"], ex["passage_id"]]), ex
|