jdpressman's picture
Add sharding script
e9398b2 verified
metadata
task_categories:
  - text-generation

Comma v0.1 Training Dataset (1 Billion Token Sample)

This is a 1 billion token subset of the Comma v0.1 Training Set intended as a convenience for small deep learning experiments. It is similar in spirit to the 1 billion token RedPajama sample which is no longer functioning with HuggingFace transformers due to involving the execution of arbitrary code at load time.

Method

The subset was created using a single item batch version of the following script which I no longer have:

import os
import json
import random
import requests
from datasets import load_dataset
from transformers import AutoTokenizer
from argparse import ArgumentParser

parser = ArgumentParser()
parser.add_argument("--tokens", default=10**9, type=int,
                    help="The number of tokens to subset.")
args = parser.parse_args()

tokenizer = AutoTokenizer.from_pretrained("common-pile/comma-v0.1-1t")
dataset = load_dataset("common-pile/comma_v0.1_training_dataset")

used = set()
token_count = 0
split = {"train":[]}
if os.path.exists("subset_resume.json"):
    with open("subset_resume.json") as infile:
        data = json.load(infile)
        spans = set(data["used"])
        token_count = data["token_count"]
        split = data["split"]

milestone = 10 ** 6
while token_count < args.tokens:
    choices = set()
    for i in range(64):
        choice = random.randrange(dataset["train"].num_rows)
        while choice in used:
            choice = random.randrange(dataset["train"].num_rows)
        used.add(choice)
        choices.add(choice)
    assert len(choices) == 64
    items = []
    for choice in choices:
        items.append(dataset["train"][choice])
    texts = [item["text"] for item in items]
    token_count += sum([len(i) for i in tokenizer(texts)["input_ids"]])
    split["train"].extend(items)
    if token_count > milestone:
        with open("subset_resume.json", "w") as outfile:
            serial_used = list(used)
            json.dump({"used":serial_used, "token_count":token_count, "split":split}, outfile)
        milestone += 10 ** 6
        print(token_count, f"{(token_count / args.tokens) * 100}%")

with open(f"subset_{args.tokens}.json", "w") as outfile:
    json.dump(split, outfile)

Feel free to modify and use this script to create subsets of other datasets.

The dataset was sharded using the following script:

import json
import gzip
import math
from pathlib import Path

def shard_dataset(input_file, output_dir, num_shards=4):
    """
    Shard a JSON dataset into multiple gzipped JSON lines files.
    
    Args:
        input_file (str): Path to the input JSON file
        output_dir (str): Directory where shards will be saved
        num_shards (int): Number of shards to create
    """
    # Create output directory if it doesn't exist
    Path(output_dir).mkdir(parents=True, exist_ok=True)
    
    # Load the dataset
    print(f"Loading dataset from {input_file}...")
    with open(input_file, 'r') as f:
        data = json.load(f)
    
    # Extract the training examples
    train_examples = data["train"]
    total_examples = len(train_examples)
    examples_per_shard = math.ceil(total_examples / num_shards)
    
    print(f"Found {total_examples} examples, splitting into {num_shards} shards")
    
    # Create each shard
    for shard_idx in range(num_shards):
        # Calculate start and end indices for this shard
        start_idx = shard_idx * examples_per_shard
        end_idx = min((shard_idx + 1) * examples_per_shard, total_examples)
        
        # Format the filename with zero-padding
        filename = f"train-{shard_idx:05d}-of-{num_shards:05d}.jsonl.gz"
        filepath = Path(output_dir) / filename
        
        print(f"Creating shard {shard_idx+1}/{num_shards}: {filename}")
        
        # Write the shard as gzipped JSON lines
        with gzip.open(filepath, 'wt', encoding='utf-8') as f:
            for i in range(start_idx, end_idx):
                # Write each example as a JSON line
                json_line = json.dumps(train_examples[i])
                f.write(json_line + '\n')
    
    print(f"Finished creating {num_shards} shards in {output_dir}")

if __name__ == "__main__":
    # Configuration - update these paths as needed
    input_json_file = "1B_sample/train.json"  # Update this path
    output_directory = "1B_sample/sharded_dataset"           # Update this if needed
    
    # Shard the dataset into 4 parts
    shard_dataset(input_json_file, output_directory, num_shards=4)