Datasets:
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
100K<n<1M
Tags:
text segmentation
document segmentation
topic segmentation
topic shift detection
semantic chunking
chunking
License:
File size: 3,246 Bytes
77c18d1 5e2dfcd 77c18d1 1ba69f4 77c18d1 5e2dfcd 77c18d1 1ba69f4 77c18d1 209335e 77c18d1 1ba69f4 77c18d1 209335e 77c18d1 1ba69f4 77c18d1 209335e 77c18d1 209335e 77c18d1 5e2dfcd 77c18d1 5e2dfcd 77c18d1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_stack: bool = False):
def non_empty(s):
return s and not s.isspace()
### Split the text into sections
raw_sections = text.strip().split("========,")
sections = []
for raw_section in raw_sections[1:]: # Skip the first split as it will be empty
lines = raw_section.split("\n")
header = lines[0].split(',')
level, title = header[0].strip(), header[1].strip()
sentences = [stripped_sent for sent in lines[1:] if (stripped_sent := sent.strip())]
sections.append({
'level': int(level),
'title': title,
'sentences': sentences
})
### Parse the sections into sentences
doc_id = id
doc_ids = []
doc_sentences = []
doc_levels = []
doc_titles_mask = []
doc_labels = []
titles = []
for sec_idx, section in enumerate(sections):
level = section['level']
title = section['title']
sentences = [sent for sent in section['sentences'] if non_empty(sent)]
if prepend_title_stack:
# Remove irrelevant titles history
while titles and (last_level := titles[-1][0]) >= level:
titles.pop()
# Add current title
titles.append((level, title))
title_str = ' '.join([t for l, t in titles if non_empty(t)])
# Don't keep 'preface' in the titles history
if title.lower() == 'preface.' and level == 1:
titles.pop()
else:
title_str = title
# If section is empty, continue
if not sentences:
continue
# Add the title as a single sentence
if not drop_titles and non_empty(title_str):
# doc_ids.append(f'{doc_id}_sec{sec_idx}_title')
doc_ids.append(f'{sec_idx}')
doc_sentences.append(title_str)
doc_titles_mask.append(1)
doc_levels.append(level)
doc_labels.append(0)
# Add the sentences
for sent_idx, sent in enumerate(sentences):
doc_ids.append(f'{sec_idx}_{sent_idx}')
doc_sentences.append(sent)
doc_titles_mask.append(0)
doc_levels.append(level)
doc_labels.append(1 if sent_idx == len(sentences) - 1 else 0)
out = {
'id': doc_id,
'ids': doc_ids,
'sentences': doc_sentences,
'titles_mask': doc_titles_mask,
'levels': doc_levels,
'labels': doc_labels
}
if drop_titles:
out.pop('titles_mask')
out.pop('levels')
return out
def parse_split_files(split_path: str, drop_titles: bool = False, prepend_title_stack: bool = False):
for root, dirs, files in os.walk(split_path):
for fname in files:
fpath = os.path.join(root, fname)
with open(fpath, 'r', encoding='utf-8') as f:
raw_text = f.read()
yield _parse_article(
text = raw_text,
id = fname,
drop_titles = drop_titles,
prepend_title_stack = prepend_title_stack
) |