jdpressman commited on
Commit
6e2c2d6
·
verified ·
1 Parent(s): 83dbb3f

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +87 -0
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - text-generation
4
+ ---
5
+
6
+ # Comma v0.1 Training Dataset (10 Billion Token Sample)
7
+
8
+ This is a 10 billion token subset of the [Comma v0.1 Training Set](https://huggingface.co/datasets/common-pile/comma_v0.1_training_dataset) intended
9
+ as a convenience for small deep learning experiments. It is similar in spirit to the [1 billion token RedPajama sample](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample)
10
+ which is no longer functioning with HuggingFace transformers due to involving the execution of arbitrary code at load time.
11
+
12
+ ## Method
13
+
14
+ The data was subsetted using the following script:
15
+
16
+ ```python
17
+ import os
18
+ import json
19
+ import gzip
20
+ import math
21
+ import random
22
+ import requests
23
+ from pathlib import Path
24
+ from tqdm import tqdm
25
+ from datasets import load_dataset
26
+ from transformers import AutoTokenizer
27
+ from argparse import ArgumentParser
28
+
29
+ parser = ArgumentParser()
30
+ parser.add_argument("--output-dir", type=Path, default="shards")
31
+ parser.add_argument("--tokens", default=10**9, type=int,
32
+ help="The number of tokens to subset.")
33
+ parser.add_argument("--shard-size", type=int, default=(250 * (10 ** 6)))
34
+ args = parser.parse_args()
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained("common-pile/comma-v0.1-1t")
37
+ dataset = load_dataset("common-pile/comma_v0.1_training_dataset")
38
+
39
+ if not os.path.exists("shards"):
40
+ os.mkdir("shards")
41
+
42
+ used = set()
43
+ token_count = 0
44
+ shard_index = 0
45
+ if os.path.exists("subset_resume.json"):
46
+ with open("subset_resume.json") as infile:
47
+ data = json.load(infile)
48
+ spans = set(data["used"])
49
+ token_count = data["token_count"]
50
+ shard_index = data["shard_index"]
51
+
52
+ num_shards = math.ceil(args.tokens / args.shard_size)
53
+ milestone = args.shard_size
54
+ progress = tqdm(total=args.tokens)
55
+ while token_count < args.tokens:
56
+ progress.set_description(f"Tokens Processed (Shard {shard_index})")
57
+ filename = f"train-{shard_index:05d}-of-{num_shards:05d}.jsonl.gz"
58
+ filepath = Path(args.output_dir) / filename
59
+ with gzip.open(filepath, 'wt', encoding='utf-8') as outfile:
60
+ while token_count < milestone:
61
+ choices = set()
62
+ for i in range(64):
63
+ choice = random.randrange(dataset["train"].num_rows)
64
+ while choice in used:
65
+ choice = random.randrange(dataset["train"].num_rows)
66
+ used.add(choice)
67
+ choices.add(choice)
68
+ assert len(choices) == 64
69
+ items = []
70
+ for choice in choices:
71
+ items.append(dataset["train"][choice])
72
+ texts = [item["text"] for item in items]
73
+ new_tokens = sum([len(i) for i in tokenizer(texts)["input_ids"]])
74
+ token_count += new_tokens
75
+ progress.update(new_tokens)
76
+ for item in items:
77
+ json_line = json.dumps(item)
78
+ outfile.write(json_line + "\n")
79
+ if token_count > milestone:
80
+ with open("subset_resume.json", "w") as outfile:
81
+ serial_used = list(used)
82
+ json.dump({"used":serial_used, "token_count":token_count, "shard_index":shard_index}, outfile)
83
+ milestone += args.shard_size
84
+ shard_index += 1
85
+ ```
86
+
87
+ Feel free to adapt and use this cript to make other subsets.