Datasets:
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
100K<n<1M
Tags:
text segmentation
document segmentation
topic segmentation
topic shift detection
semantic chunking
chunking
License:
import os | |
def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_stack: bool = False): | |
def non_empty(s): | |
return s and not s.isspace() | |
### Split the text into sections | |
raw_sections = text.strip().split("========,") | |
sections = [] | |
for raw_section in raw_sections[1:]: # Skip the first split as it will be empty | |
lines = raw_section.split("\n") | |
header = lines[0].split(',') | |
level, title = header[0].strip(), header[1].strip() | |
sentences = [stripped_sent for sent in lines[1:] if (stripped_sent := sent.strip())] | |
sections.append({ | |
'level': int(level), | |
'title': title, | |
'sentences': sentences | |
}) | |
### Parse the sections into sentences | |
doc_id = id | |
doc_ids = [] | |
doc_sentences = [] | |
doc_levels = [] | |
doc_titles_mask = [] | |
doc_labels = [] | |
titles = [] | |
for sec_idx, section in enumerate(sections): | |
level = section['level'] | |
title = section['title'] | |
sentences = [sent for sent in section['sentences'] if non_empty(sent)] | |
if prepend_title_stack: | |
# Remove irrelevant titles history | |
while titles and (last_level := titles[-1][0]) >= level: | |
titles.pop() | |
# Add current title | |
titles.append((level, title)) | |
title_str = ' '.join([t for l, t in titles if non_empty(t)]) | |
# Don't keep 'preface' in the titles history | |
if title.lower() == 'preface.' and level == 1: | |
titles.pop() | |
else: | |
title_str = title | |
# If section is empty, continue | |
if not sentences: | |
continue | |
# Add the title as a single sentence | |
if not drop_titles and non_empty(title_str): | |
# doc_ids.append(f'{doc_id}_sec{sec_idx}_title') | |
doc_ids.append(f'{sec_idx}') | |
doc_sentences.append(title_str) | |
doc_titles_mask.append(1) | |
doc_levels.append(level) | |
doc_labels.append(0) | |
# Add the sentences | |
for sent_idx, sent in enumerate(sentences): | |
doc_ids.append(f'{sec_idx}_{sent_idx}') | |
doc_sentences.append(sent) | |
doc_titles_mask.append(0) | |
doc_levels.append(level) | |
doc_labels.append(1 if sent_idx == len(sentences) - 1 else 0) | |
out = { | |
'id': doc_id, | |
'ids': doc_ids, | |
'sentences': doc_sentences, | |
'titles_mask': doc_titles_mask, | |
'levels': doc_levels, | |
'labels': doc_labels | |
} | |
if drop_titles: | |
out.pop('titles_mask') | |
out.pop('levels') | |
return out | |
def parse_split_files(split_path: str, drop_titles: bool = False, prepend_title_stack: bool = False): | |
for root, dirs, files in os.walk(split_path): | |
for fname in files: | |
fpath = os.path.join(root, fname) | |
with open(fpath, 'r', encoding='utf-8') as f: | |
raw_text = f.read() | |
yield _parse_article( | |
text = raw_text, | |
id = fname, | |
drop_titles = drop_titles, | |
prepend_title_stack = prepend_title_stack | |
) |