changed tokenization
Browse files- ref_seg_ger.py +20 -20
ref_seg_ger.py
CHANGED
@@ -275,13 +275,13 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
275 |
clean_input_ids.append(input)
|
276 |
clean_labels.append(labels[i])
|
277 |
clean_refs.append(refs[i])
|
278 |
-
n_chunks = int(len(clean_input_ids) / self.CHUNK_SIZE) if len(clean_input_ids) % self.CHUNK_SIZE == 0 \
|
279 |
-
|
280 |
-
split_ids = np.array_split(clean_input_ids, n_chunks)
|
281 |
-
split_labels = np.array_split(clean_labels, n_chunks)
|
282 |
-
split_refs = np.array_split(clean_refs, n_chunks)
|
283 |
-
print(clean_input_ids)
|
284 |
-
for chunk_ids, chunk_labels, chunk_refs in zip(clean_input_ids, clean_labels, clean_refs):
|
285 |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
286 |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
287 |
# split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
@@ -302,16 +302,16 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
302 |
# print(split_labels, len(split_labels))
|
303 |
# print(split_ids, len(split_ids))
|
304 |
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
|
|
275 |
clean_input_ids.append(input)
|
276 |
clean_labels.append(labels[i])
|
277 |
clean_refs.append(refs[i])
|
278 |
+
# n_chunks = int(len(clean_input_ids) / self.CHUNK_SIZE) if len(clean_input_ids) % self.CHUNK_SIZE == 0 \
|
279 |
+
# else int(len(clean_input_ids) / self.CHUNK_SIZE) + 1
|
280 |
+
# split_ids = np.array_split(clean_input_ids, n_chunks)
|
281 |
+
# split_labels = np.array_split(clean_labels, n_chunks)
|
282 |
+
# split_refs = np.array_split(clean_refs, n_chunks)
|
283 |
+
# print(clean_input_ids)
|
284 |
+
# for chunk_ids, chunk_labels, chunk_refs in zip(clean_input_ids, clean_labels, clean_refs):
|
285 |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
286 |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
287 |
# split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
|
|
302 |
# print(split_labels, len(split_labels))
|
303 |
# print(split_ids, len(split_ids))
|
304 |
|
305 |
+
yield key, {
|
306 |
+
# "id": f"{os.path.basename(f)}_{chunk_id}",
|
307 |
+
'tokens': clean_input_ids,
|
308 |
+
# 'attention_mask': [1] * len(chunk_ids),
|
309 |
+
# "bbox": split_bboxes,
|
310 |
+
# "RGBs": split_rgbs,
|
311 |
+
# "fonts": split_fonts,
|
312 |
+
# "image": image,
|
313 |
+
# "original_image": original_image,
|
314 |
+
"labels": clean_labels,
|
315 |
+
"labels_ref": clean_refs
|
316 |
+
}
|
317 |
+
key += 1
|