File size: 1,879 Bytes
feb6f45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
DIR="/sml1/datasets/slimpj/hub/datasets--MBZUAI-LLM--SlimPajama-627B-DC/snapshots/fe5ace6d3edb8568b6a4f608a460d3f7aef7bc0b"
DATASET_NAME="RedPajamaArxiv"
TRAIN_DIR="$DIR/train/$DATASET_NAME"
TEST_DIR="$DIR/test/$DATASET_NAME"
OUTPUT_TRAIN_DIR="$DIR/train/$DATASET_NAME-copy"
OUTPUT_TEST_DIR="$DIR/test/$DATASET_NAME-copy"

mkdir -p $OUTPUT_TEST_DIR
mkdir -p $OUTPUT_TRAIN_DIR

cd $TRAIN_DIR
ls -lrt .  |  awk '{print $9,$11}' | while read a b; do cmd="cp $b $OUTPUT_TRAIN_DIR/$a"; eval $cmd;done;

cd $TEST_DIR
ls -lrt .  |  awk '{print $9,$11}' | while read a b; do cmd="cp $b $OUTPUT_TEST_DIR/$a"; eval $cmd;done;
cd -

FINAL_DIR="/sml1/datasets/$DATASET_NAME/"
mkdir -p $FINAL_DIR
mkdir -p $FINAL_DIR/train
mkdir -p $FINAL_DIR/test

max_files=$(ls -lrt $TRAIN_DIR | wc -l)
mx=$(echo ${#max_files})
REGEX="[0-9]"
for m in $(seq 1 $mx)
do
    cmd="unzstd $OUTPUT_TRAIN_DIR/chunk_$REGEX.jsonl.zst --stdout > $FINAL_DIR/train/p_$m.jsonl"
    eval $cmd
    REGEX="[1-9]$REGEX"
done
final_cmd="cat $FINAL_DIR/train/p_[1-$mx].jsonl > $FINAL_DIR/train/final.jsonl"
eval $final_cmd

max_files=$(ls -lrt $TEST_DIR | wc -l)
mx=$(echo ${#max_files})
REGEX="[0-9]"
for m in $(seq 1 $mx)
do
    cmd="unzstd $OUTPUT_TEST_DIR/chunk_$REGEX.jsonl.zst --stdout > $FINAL_DIR/test/p_$m.jsonl"
    eval $cmd
    REGEX="[1-9]$REGEX"
done

final_cmd="cat $FINAL_DIR/test/p_[1-$mx].jsonl > $FINAL_DIR/test/final.jsonl"
eval $final_cmd

cat $FINAL_DIR/*/final.jsonl > $FINAL_DIR/final.jsonl

mkdir -p $FINAL_DIR/tokenizer/
python3 /sml1/Megatron-LLaMA/tools/preprocess_data.py \
 --input $FINAL_DIR/final.jsonl \
 --output-prefix $FINAL_DIR/tokenizer/ \
  --vocab-file /sml1/datasets/gpt2/vocab.json  \
   --merge-file /sml1/datasets/gpt2/merges.txt \
    --dataset-impl mmap --tokenizer-type GPT2BPETokenizer  \
    --append-eod --workers 8 --chunk-size 50  >tokenizer.out 2>tokenizer.err