jdpressman commited on
Commit
3aaf111
·
verified ·
1 Parent(s): b1913a2

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +70 -0
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - text-generation
4
+ ---
5
+
6
+ # Comma v0.1 Training Dataset (1 Billion Token Sample)
7
+
8
+ This is a 1 billion token subset of the [Comma v0.1 Training Set](https://huggingface.co/datasets/common-pile/comma_v0.1_training_dataset) intended
9
+ as a convenience for small deep learning experiments. It is similar in spirit to the [1 billion token RedPajama sample](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample)
10
+ which is no longer functioning with HuggingFace transformers due to involving the execution of arbitrary code at load time.
11
+
12
+ ## Method
13
+
14
+ The subset was created using a single item batch version of the following script which I no longer have:
15
+
16
+ ```python
17
+ import os
18
+ import json
19
+ import random
20
+ import requests
21
+ from datasets import load_dataset
22
+ from transformers import AutoTokenizer
23
+ from argparse import ArgumentParser
24
+
25
+ parser = ArgumentParser()
26
+ parser.add_argument("--tokens", default=10**9, type=int,
27
+ help="The number of tokens to subset.")
28
+ args = parser.parse_args()
29
+
30
+ tokenizer = AutoTokenizer.from_pretrained("common-pile/comma-v0.1-1t")
31
+ dataset = load_dataset("common-pile/comma_v0.1_training_dataset")
32
+
33
+ used = set()
34
+ token_count = 0
35
+ split = {"train":[]}
36
+ if os.path.exists("subset_resume.json"):
37
+ with open("subset_resume.json") as infile:
38
+ data = json.load(infile)
39
+ spans = set(data["used"])
40
+ token_count = data["token_count"]
41
+ split = data["split"]
42
+
43
+ milestone = 10 ** 6
44
+ while token_count < args.tokens:
45
+ choices = set()
46
+ for i in range(64):
47
+ choice = random.randrange(dataset["train"].num_rows)
48
+ while choice in used:
49
+ choice = random.randrange(dataset["train"].num_rows)
50
+ used.add(choice)
51
+ choices.add(choice)
52
+ assert len(choices) == 64
53
+ items = []
54
+ for choice in choices:
55
+ items.append(dataset["train"][choice])
56
+ texts = [item["text"] for item in items]
57
+ token_count += sum([len(i) for i in tokenizer(texts)["input_ids"]])
58
+ split["train"].extend(items)
59
+ if token_count > milestone:
60
+ with open("subset_resume.json", "w") as outfile:
61
+ serial_used = list(used)
62
+ json.dump({"used":serial_used, "token_count":token_count, "split":split}, outfile)
63
+ milestone += 10 ** 6
64
+ print(token_count, f"{(token_count / args.tokens) * 100}%")
65
+
66
+ with open(f"subset_{args.tokens}.json", "w") as outfile:
67
+ json.dump(split, outfile)
68
+ ```
69
+
70
+ Feel free to modify and use this script to create subsets of other datasets.