File size: 4,721 Bytes
3aaf111 e9398b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
---
task_categories:
- text-generation
---
# Comma v0.1 Training Dataset (1 Billion Token Sample)
This is a 1 billion token subset of the [Comma v0.1 Training Set](https://huggingface.co/datasets/common-pile/comma_v0.1_training_dataset) intended
as a convenience for small deep learning experiments. It is similar in spirit to the [1 billion token RedPajama sample](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample)
which is no longer functioning with HuggingFace transformers due to involving the execution of arbitrary code at load time.
## Method
The subset was created using a single item batch version of the following script which I no longer have:
```python
import os
import json
import random
import requests
from datasets import load_dataset
from transformers import AutoTokenizer
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--tokens", default=10**9, type=int,
help="The number of tokens to subset.")
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained("common-pile/comma-v0.1-1t")
dataset = load_dataset("common-pile/comma_v0.1_training_dataset")
used = set()
token_count = 0
split = {"train":[]}
if os.path.exists("subset_resume.json"):
with open("subset_resume.json") as infile:
data = json.load(infile)
spans = set(data["used"])
token_count = data["token_count"]
split = data["split"]
milestone = 10 ** 6
while token_count < args.tokens:
choices = set()
for i in range(64):
choice = random.randrange(dataset["train"].num_rows)
while choice in used:
choice = random.randrange(dataset["train"].num_rows)
used.add(choice)
choices.add(choice)
assert len(choices) == 64
items = []
for choice in choices:
items.append(dataset["train"][choice])
texts = [item["text"] for item in items]
token_count += sum([len(i) for i in tokenizer(texts)["input_ids"]])
split["train"].extend(items)
if token_count > milestone:
with open("subset_resume.json", "w") as outfile:
serial_used = list(used)
json.dump({"used":serial_used, "token_count":token_count, "split":split}, outfile)
milestone += 10 ** 6
print(token_count, f"{(token_count / args.tokens) * 100}%")
with open(f"subset_{args.tokens}.json", "w") as outfile:
json.dump(split, outfile)
```
Feel free to modify and use this script to create subsets of other datasets.
The dataset was sharded using the following script:
```python
import json
import gzip
import math
from pathlib import Path
def shard_dataset(input_file, output_dir, num_shards=4):
"""
Shard a JSON dataset into multiple gzipped JSON lines files.
Args:
input_file (str): Path to the input JSON file
output_dir (str): Directory where shards will be saved
num_shards (int): Number of shards to create
"""
# Create output directory if it doesn't exist
Path(output_dir).mkdir(parents=True, exist_ok=True)
# Load the dataset
print(f"Loading dataset from {input_file}...")
with open(input_file, 'r') as f:
data = json.load(f)
# Extract the training examples
train_examples = data["train"]
total_examples = len(train_examples)
examples_per_shard = math.ceil(total_examples / num_shards)
print(f"Found {total_examples} examples, splitting into {num_shards} shards")
# Create each shard
for shard_idx in range(num_shards):
# Calculate start and end indices for this shard
start_idx = shard_idx * examples_per_shard
end_idx = min((shard_idx + 1) * examples_per_shard, total_examples)
# Format the filename with zero-padding
filename = f"train-{shard_idx:05d}-of-{num_shards:05d}.jsonl.gz"
filepath = Path(output_dir) / filename
print(f"Creating shard {shard_idx+1}/{num_shards}: {filename}")
# Write the shard as gzipped JSON lines
with gzip.open(filepath, 'wt', encoding='utf-8') as f:
for i in range(start_idx, end_idx):
# Write each example as a JSON line
json_line = json.dumps(train_examples[i])
f.write(json_line + '\n')
print(f"Finished creating {num_shards} shards in {output_dir}")
if __name__ == "__main__":
# Configuration - update these paths as needed
input_json_file = "1B_sample/train.json" # Update this path
output_directory = "1B_sample/sharded_dataset" # Update this if needed
# Shard the dataset into 4 parts
shard_dataset(input_json_file, output_directory, num_shards=4)
``` |