applied-ai-018 commited on
Commit
0387b0f
·
verified ·
1 Parent(s): cf5659c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evaluation/results/tr3/tr3d-1B3-oscar-checkpoints_agg.json +0 -0
  2. jz/configs/dec_only_t5/decoder_only_t5-large.json +22 -0
  3. jz/configs/dec_only_t5/decoder_only_t5-medium.json +22 -0
  4. jz/configs/dec_only_t5/decoder_only_t5-small.json +22 -0
  5. jz/configs/dec_only_t5/decoder_only_t5-tiny.json +22 -0
  6. jz/configs/dec_only_t5/decoder_only_t5-xl.json +22 -0
  7. jz/configs/deepspeed/README.md +1 -0
  8. jz/configs/deepspeed/ds_zero0.json +33 -0
  9. jz/configs/deepspeed/ds_zero2.json +48 -0
  10. jz/configs/deepspeed/ds_zero3.json +56 -0
  11. jz/configs/lm_t5/lm_t5-large.json +23 -0
  12. jz/configs/lm_t5/lm_t5-medium.json +23 -0
  13. jz/configs/lm_t5/lm_t5-small.json +23 -0
  14. jz/configs/lm_t5/lm_t5-tiny.json +23 -0
  15. jz/configs/lm_t5/lm_t5-xl.json +23 -0
  16. jz/crontab/README.md +84 -0
  17. jz/crontab/cron-daily.slurm +23 -0
  18. jz/crontab/cron-hourly.slurm +23 -0
  19. jz/envs/start-prod +60 -0
  20. jz/envs/start-user +59 -0
  21. jz/envs/workarounds.md +8 -0
  22. jz/model_storage/move_checkpoints_to_store_tr11b.slurm +44 -0
  23. jz/model_storage/move_checkpoints_to_store_tr11e.slurm +43 -0
  24. jz/model_storage/move_checkpoints_to_store_tr11f.slurm +44 -0
  25. jz/scripts/custom_callbacks.py +95 -0
  26. jz/scripts/run_clm.py +520 -0
  27. jz/scripts/run_clm_prompted.py +534 -0
  28. jz/scripts/run_text2text.py +514 -0
  29. jz/slurms_scripts/cpu.slurm +40 -0
  30. jz/slurms_scripts/eval.slurm +37 -0
  31. jz/slurms_scripts/lmt5.slurm +51 -0
  32. train/arch-and-scaling-template.slurm +186 -0
  33. train/fixes.md +70 -0
  34. train/lessons-learned.md +88 -0
  35. train/tflops_optimization.md +33 -0
  36. train/tr10-13B-ml/chronicles.md +0 -0
  37. train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3zhmt.slurm +211 -0
  38. train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-a100.slurm +212 -0
  39. train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq.slurm +211 -0
  40. train/tr13-mtf/smaller_models/tr13e-760m-mtf-xp3capmixnewcodelonglossseq-a100.slurm +211 -0
  41. train/tr13-mtf/smaller_models/tr13e-760m-mtf-xp3capmixnewcodelonglossseq.slurm +211 -0
  42. train/tr13-mtf/smaller_models/tr13f-6B3-mtf-eos.slurm +209 -0
  43. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31.slurm +210 -0
  44. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmix.slurm +210 -0
  45. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlong.slurm +210 -0
  46. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlonglossseq.slurm +211 -0
  47. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlonglossseq2.slurm +212 -0
  48. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlonglossseq.slurm +211 -0
  49. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqeos.slurm +211 -0
  50. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq-val.slurm +212 -0
evaluation/results/tr3/tr3d-1B3-oscar-checkpoints_agg.json ADDED
The diff for this file is too large to render. See raw diff
 
jz/configs/dec_only_t5/decoder_only_t5-large.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 5120,
6
+ "d_kv": 64,
7
+ "d_model": 1280,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": true,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 20,
15
+ "num_layers": 36,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
jz/configs/dec_only_t5/decoder_only_t5-medium.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 4096,
6
+ "d_kv": 64,
7
+ "d_model": 1024,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": true,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 16,
15
+ "num_layers": 24,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
jz/configs/dec_only_t5/decoder_only_t5-small.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 3072,
6
+ "d_kv": 64,
7
+ "d_model": 768,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": false,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 12,
15
+ "num_layers": 12,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
jz/configs/dec_only_t5/decoder_only_t5-tiny.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 2048,
6
+ "d_kv": 64,
7
+ "d_model": 512,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": false,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 8,
15
+ "num_layers": 6,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
jz/configs/dec_only_t5/decoder_only_t5-xl.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DecoderOnlyT5LMHeadModel"
4
+ ],
5
+ "d_ff": 6400,
6
+ "d_kv": 64,
7
+ "d_model": 1600,
8
+ "dropout_rate": 0.1,
9
+ "eos_token_id": 1,
10
+ "initializer_factor": 1.0,
11
+ "is_encoder_decoder": true,
12
+ "layer_norm_epsilon": 1e-06,
13
+ "model_type": "decoder_only_t5",
14
+ "num_heads": 25,
15
+ "num_layers": 48,
16
+ "output_past": true,
17
+ "pad_token_id": 0,
18
+ "relative_attention_num_buckets": 64,
19
+ "task_specific_params": {
20
+ },
21
+ "vocab_size": 32128
22
+ }
jz/configs/deepspeed/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # Deepspeed configs
jz/configs/deepspeed/ds_zero0.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": "auto",
4
+ "loss_scale": 0,
5
+ "loss_scale_window": 1000,
6
+ "initial_scale_power": 16,
7
+ "hysteresis": 2,
8
+ "min_loss_scale": 1
9
+ },
10
+ "optimizer": {
11
+ "type": "AdamW",
12
+ "params": {
13
+ "lr": "auto",
14
+ "betas": "auto",
15
+ "eps": "auto",
16
+ "weight_decay": "auto"
17
+ }
18
+ },
19
+ "scheduler": {
20
+ "type": "WarmupLR",
21
+ "params": {
22
+ "warmup_min_lr": "auto",
23
+ "warmup_max_lr": "auto",
24
+ "warmup_num_steps": "auto"
25
+ }
26
+ },
27
+ "gradient_accumulation_steps": "auto",
28
+ "gradient_clipping": "auto",
29
+ "steps_per_print": 2000,
30
+ "train_batch_size": "auto",
31
+ "train_micro_batch_size_per_gpu": "auto",
32
+ "wall_clock_breakdown": false
33
+ }
jz/configs/deepspeed/ds_zero2.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": true,
4
+ "loss_scale": 0,
5
+ "loss_scale_window": 1000,
6
+ "initial_scale_power": 32,
7
+ "hysteresis": 2,
8
+ "min_loss_scale": 1
9
+ },
10
+
11
+ "optimizer": {
12
+ "type": "AdamW",
13
+ "params": {
14
+ "lr": "auto",
15
+ "betas": "auto",
16
+ "eps": "auto",
17
+ "weight_decay": "auto"
18
+ }
19
+ },
20
+
21
+ "scheduler": {
22
+ "type": "WarmupDecayLR",
23
+ "params": {
24
+ "warmup_min_lr": "auto",
25
+ "warmup_max_lr": "auto",
26
+ "warmup_num_steps": "auto",
27
+ "total_num_steps": "auto"
28
+ }
29
+ },
30
+
31
+ "zero_optimization": {
32
+ "stage": 2,
33
+ "allgather_partitions": true,
34
+ "allgather_bucket_size": 3e8,
35
+ "overlap_comm": true,
36
+ "reduce_scatter": true,
37
+ "reduce_bucket_size": 3e8,
38
+ "contiguous_gradients": true,
39
+ "cpu_offload": true
40
+ },
41
+
42
+ "gradient_accumulation_steps": "auto",
43
+ "gradient_clipping": "auto",
44
+ "steps_per_print": 5000,
45
+ "train_batch_size": "auto",
46
+ "train_micro_batch_size_per_gpu": "auto",
47
+ "wall_clock_breakdown": false
48
+ }
jz/configs/deepspeed/ds_zero3.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": true,
4
+ "loss_scale": 0,
5
+ "loss_scale_window": 1000,
6
+ "initial_scale_power": 16,
7
+ "hysteresis": 2,
8
+ "min_loss_scale": 1
9
+ },
10
+
11
+ "optimizer": {
12
+ "type": "AdamW",
13
+ "params": {
14
+ "lr": "auto",
15
+ "betas": "auto",
16
+ "eps": "auto",
17
+ "weight_decay": "auto"
18
+ }
19
+ },
20
+
21
+ "scheduler": {
22
+ "type": "WarmupDecayLR",
23
+ "params": {
24
+ "warmup_min_lr": "auto",
25
+ "warmup_max_lr": "auto",
26
+ "warmup_num_steps": "auto",
27
+ "total_num_steps": "auto"
28
+ }
29
+ },
30
+ "zero_optimization": {
31
+ "stage": 3,
32
+ "offload_optimizer": {
33
+ "device": "cpu",
34
+ "pin_memory": true
35
+ },
36
+ "offload_param": {
37
+ "device": "cpu",
38
+ "pin_memory": true
39
+ },
40
+ "overlap_comm": true,
41
+ "contiguous_gradients": true,
42
+ "sub_group_size": 1e14,
43
+ "reduce_bucket_size": "auto",
44
+ "stage3_prefetch_bucket_size": "auto",
45
+ "stage3_param_persistence_threshold": "auto",
46
+ "stage3_max_live_parameters": 1e9,
47
+ "stage3_max_reuse_distance": 1e9,
48
+ "stage3_gather_fp16_weights_on_model_save": true
49
+ },
50
+ "gradient_accumulation_steps": "auto",
51
+ "gradient_clipping": "auto",
52
+ "steps_per_print": 5000,
53
+ "train_batch_size": "auto",
54
+ "train_micro_batch_size_per_gpu": "auto",
55
+ "wall_clock_breakdown": false
56
+ }
jz/configs/lm_t5/lm_t5-large.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5WithLMHeadModel"
4
+ ],
5
+ "d_ff": 5120,
6
+ "d_kv": 64,
7
+ "d_model": 1280,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "num_heads": 20,
16
+ "num_layers": 36,
17
+ "output_past": true,
18
+ "pad_token_id": 0,
19
+ "relative_attention_num_buckets": 64,
20
+ "task_specific_params": {
21
+ },
22
+ "vocab_size": 32128
23
+ }
jz/configs/lm_t5/lm_t5-medium.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5WithLMHeadModel"
4
+ ],
5
+ "d_ff": 4096,
6
+ "d_kv": 64,
7
+ "d_model": 1024,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "num_heads": 16,
16
+ "num_layers": 24,
17
+ "output_past": true,
18
+ "pad_token_id": 0,
19
+ "relative_attention_num_buckets": 64,
20
+ "task_specific_params": {
21
+ },
22
+ "vocab_size": 32128
23
+ }
jz/configs/lm_t5/lm_t5-small.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5WithLMHeadModel"
4
+ ],
5
+ "d_ff": 3072,
6
+ "d_kv": 64,
7
+ "d_model": 768,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "num_heads": 12,
16
+ "num_layers": 12,
17
+ "output_past": true,
18
+ "pad_token_id": 0,
19
+ "relative_attention_num_buckets": 64,
20
+ "task_specific_params": {
21
+ },
22
+ "vocab_size": 32128
23
+ }
jz/configs/lm_t5/lm_t5-tiny.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5WithLMHeadModel"
4
+ ],
5
+ "d_ff": 2048,
6
+ "d_kv": 64,
7
+ "d_model": 512,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "num_heads": 8,
16
+ "num_layers": 6,
17
+ "output_past": true,
18
+ "pad_token_id": 0,
19
+ "relative_attention_num_buckets": 64,
20
+ "task_specific_params": {
21
+ },
22
+ "vocab_size": 32128
23
+ }
jz/configs/lm_t5/lm_t5-xl.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5WithLMHeadModel"
4
+ ],
5
+ "d_ff": 6400,
6
+ "d_kv": 64,
7
+ "d_model": 1600,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "initializer_factor": 1.0,
12
+ "is_encoder_decoder": true,
13
+ "layer_norm_epsilon": 1e-06,
14
+ "model_type": "t5",
15
+ "num_heads": 25,
16
+ "num_layers": 48,
17
+ "output_past": true,
18
+ "pad_token_id": 0,
19
+ "relative_attention_num_buckets": 64,
20
+ "task_specific_params": {
21
+ },
22
+ "vocab_size": 32128
23
+ }
jz/crontab/README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Crontab Jobs
2
+
3
+ JZ has no crontab so we have to emulate it.
4
+
5
+ Put your slurm scripts into either:
6
+ ```
7
+ $six_ALL_CCFRWORK/cron/cron.hourly
8
+ $six_ALL_CCFRWORK/cron/cron.daily
9
+ ```
10
+ depending on whether you want to run those approximately once an hour or once a day.
11
+
12
+ Any scripts found in these dirs that have `.slurm` extension, will be run as `sbatch scriptname`.
13
+
14
+ ## The scheduler
15
+
16
+ The scheduler isn't run automatically, we have to launch it and make sure it gets restarted manually if SLURM
17
+ is restarted (not sure if jobs get preserved or not):
18
+
19
+ * [cron-hourly.slurm](./cron-hourly.slurm)
20
+ * [cron-daily.slurm](./cron-daily.slurm)
21
+
22
+ If these 2 aren't running when you run:
23
+
24
+ ```
25
+ squeue --user=$(getent group six | cut -d: -f4) | grep cron
26
+ ```
27
+ the re-launch the missing one(s) with:
28
+ ```
29
+ cd $six_ALL_CCFRWORK/cron/scheduler
30
+ sbatch cron-hourly.slurm
31
+ sbatch cron-daily.slurm
32
+ ```
33
+
34
+ If these scripts aren't there, copy them from the folder in the repo where this README.md is located.
35
+
36
+ XXX: need some kind of a watchdog to ensure the 2 cron scheduler jobs don't disappear.
37
+
38
+ quick alias to test:
39
+ ```
40
+ alias cron-check="squeue --user=$(getent group six | cut -d: -f4) | grep cron"
41
+ ```
42
+
43
+ ## Example daily entry
44
+
45
+ Here is an example of a job that gets to run daily.
46
+ ```
47
+ $ cat $six_ALL_CCFRWORK/cron/cron.daily/mlocate-update.slurm
48
+ #!/bin/bash
49
+ #SBATCH --job-name=mlocate-update # job name
50
+ #SBATCH --ntasks=1 # number of MP tasks
51
+ #SBATCH --nodes=1
52
+ #SBATCH --hint=nomultithread # we get physical cores not logical
53
+ #SBATCH --time=1:00:00 # maximum execution time (HH:MM:SS)
54
+ #SBATCH --output=%x-%j.out # output file name
55
+ #SBATCH --partition=compil
56
+ #SBATCH --account=six@cpu
57
+
58
+ set -e
59
+ date
60
+ echo "updating mlocate db"
61
+ # "--require-visibility 0" is required when launching this command as a regular user
62
+ /usr/bin/updatedb -o $ALL_CCFRWORK/lib/mlocate/work.db -U $ALL_CCFRWORK --require-visibility 0
63
+ /usr/bin/updatedb -o $ALL_CCFRWORK/lib/mlocate/worksf.db -U /gpfsssd/worksf/projects/rech/six/commun --require-visibility 0
64
+ ```
65
+
66
+ This builds an index of the files under WORK which you can then quickly query with:
67
+ ```
68
+ /usr/bin/locate -d /gpfswork/rech/six/commun/lib/mlocate/mlocate.db pattern
69
+ ```
70
+
71
+ The slurm script `mlocate-update.slurm` has been placed inside `$six_ALL_CCFRWORK/cron/cron.daily`. To stop running it, just move it elsewhere.
72
+
73
+ Another approach to adding/removing is to keep the slurm scripts elsewhere and symlink to them from either
74
+ `$six_ALL_CCFRWORK/cron/cron.daily` or `$six_ALL_CCFRWORK/cron/cron.hourly` according to the need.
75
+
76
+
77
+ ## Permissions
78
+
79
+ The scheduler runs with Unix permissions of the person who launched the SLRUM cron scheduler job and so all other SLURM scripts launched by that cron job.
80
+
81
+
82
+ ## TODO
83
+
84
+ XXX: need to have a facility to report failures. Which is tricky because the job has to run on a SLURM partition that has Internet and that's just `--partition=prepost` and `--partition=compil`
jz/crontab/cron-daily.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=cron-daily # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --hint=nomultithread # we get physical cores not logical
6
+ #SBATCH --time=2:00:00 # maximum execution time (HH:MM:SS)
7
+ #SBATCH --output=%x-%j.out # output file name
8
+ #SBATCH --partition=compil
9
+ #SBATCH --account=six@cpu
10
+
11
+ # do not set -e - we must run all of it
12
+ # set -x -e
13
+
14
+ cd $six_ALL_CCFRWORK/cron/scheduler
15
+
16
+ # ensure to restart self first
17
+ sbatch --begin=now+24hour cron-daily.slurm
18
+
19
+ # now launch any slurm scripts in cron.daily
20
+ cd $six_ALL_CCFRWORK/cron/cron.daily
21
+ for f in *.slurm; do
22
+ sbatch "$f"
23
+ done
jz/crontab/cron-hourly.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=cron-hourly # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --hint=nomultithread # we get physical cores not logical
6
+ #SBATCH --time=0:30:00 # maximum execution time (HH:MM:SS)
7
+ #SBATCH --output=%x-%j.out # output file name
8
+ #SBATCH --partition=compil
9
+ #SBATCH --account=six@cpu
10
+
11
+ # do not set -e - we must run all of it
12
+ # set -x -e
13
+
14
+ cd $six_ALL_CCFRWORK/cron/scheduler
15
+
16
+ # ensure to restart self first
17
+ sbatch --begin=now+1hour cron-hourly.slurm
18
+
19
+ # now launch any slurm scripts in cron.hourly
20
+ cd $six_ALL_CCFRWORK/cron/cron.hourly
21
+ for f in *.slurm; do
22
+ sbatch "$f"
23
+ done
jz/envs/start-prod ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a python production script for JZ
2
+ #
3
+ # Activate with:
4
+ #
5
+ # source ./start-prod
6
+ #
7
+ #
8
+
9
+ # if this session isn't run via a login shell, which is the case when running a
10
+ # command which is not shell via ssh, the bash function `module` will be missing.
11
+ # so work around it by emulating part of the login shell that loads modules environment
12
+ # if [ -z $(type -t module) ]
13
+ # then
14
+ # . /etc/profile.d/z_modules.sh
15
+ # fi
16
+ module purge
17
+ module load pytorch-gpu/py3/1.8.1
18
+ module load nvtop git git-lfs github-cli mc
19
+
20
+ # git prompt
21
+ export GIT_PROMPT_ONLY_IN_REPO=0;
22
+ export GIT_PROMPT_THEME="JZPRod"
23
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
24
+
25
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
26
+ #
27
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets`
28
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/experiments`
29
+
30
+ # specific caches
31
+
32
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
33
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
34
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
35
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
36
+
37
+ #export PYTHONPATH=$WORK/hf/transformers-master/src
38
+
39
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
40
+
41
+ ### CONDA ###
42
+
43
+ # >>> conda initialize >>>
44
+ # !! Contents within this block are managed by 'conda init' !!
45
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
46
+ if [ $? -eq 0 ]; then
47
+ eval "$__conda_setup"
48
+ else
49
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
50
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
51
+ else
52
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
53
+ fi
54
+ fi
55
+ unset __conda_setup
56
+ # <<< conda initialize <<<
57
+
58
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
59
+ conda activate base
60
+ conda activate hf-prod
jz/envs/start-user ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # user env start script
2
+
3
+ # replace stas with the name of your conda env in this script
4
+
5
+ # if this session isn't run via a login shell, which is the case when running a
6
+ # command which is not shell via ssh, the bash function `module` will be missing.
7
+ # so work around it by emulating part of the login shell that loads modules environment
8
+ #if [ -z $(type -t module) ]
9
+ #then
10
+ # . /etc/profile.d/z_modules.sh
11
+ #fi
12
+ module purge
13
+ module load pytorch-gpu/py3/1.8.1
14
+ module load nvtop git git-lfs github-cli mc
15
+
16
+ # git prompt
17
+ export GIT_PROMPT_ONLY_IN_REPO=0;
18
+ export GIT_PROMPT_THEME="JZPRod"
19
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
20
+
21
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
22
+ #
23
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$ALL_CCFRWORK/datasets`
24
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/EXPERIMENTS`
25
+
26
+ # specific caches
27
+
28
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
29
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
30
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
31
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
32
+
33
+ #export PYTHONPATH=$WORK/hf/transformers-master/src
34
+
35
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
36
+
37
+
38
+
39
+
40
+ ### CONDA ###
41
+
42
+ # >>> conda initialize >>>
43
+ # !! Contents within this block are managed by 'conda init' !!
44
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
45
+ if [ $? -eq 0 ]; then
46
+ eval "$__conda_setup"
47
+ else
48
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
49
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
50
+ else
51
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
52
+ fi
53
+ fi
54
+ unset __conda_setup
55
+ # <<< conda initialize <<<
56
+
57
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
58
+ conda activate base
59
+ conda activate stas
jz/envs/workarounds.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Workarounds
2
+
3
+ ## Missing certificates
4
+
5
+ Sometimes, some certificates can be missing. It's possible to point to our own local versions of the certificates. You can simply copy them to `$six_ALL_CCFRWORK/etc/ssl/certs/` or any other relevant folder:
6
+ ```bash
7
+ export CURL_CA_BUNDLE=$six_ALL_CCFRWORK/etc/ssl/certs/ca-certificates.crt
8
+ ```
jz/model_storage/move_checkpoints_to_store_tr11b.slurm ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11b_move_to_tar # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=4 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-1362%1
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ # DEBUG
14
+ # SLURM_ARRAY_TASK_ID=0 # 0-6549
15
+
16
+ pushd $six_ALL_CCFRWORK/checkpoints
17
+ # readarray CHECKPOINTS < <(find . -regex '\./tr11[a-z].*/global_step[0-9]*')
18
+ # DEBUG regex to test out only on tr11e-350
19
+ # readarray CHECKPOINTS < <(find . -regex '\./tr11e-350M-ml/.*/global_step[0-9]*')
20
+ # batch size 512 -> one out of 4 checkpoints for 1B tokens
21
+ readarray CHECKPOINTS < <(find . -regex '\./tr11b-1B3-ml/.*/global_step[0-9]*000')
22
+
23
+ echo "Total number of checkpoints to tar: ${#CHECKPOINTS[@]}"
24
+
25
+ CHECKPOINT_TO_TAR=${CHECKPOINTS[$SLURM_ARRAY_TASK_ID]}
26
+ echo "Checkpoint to tar: $CHECKPOINT_TO_TAR"
27
+
28
+ TEMPNAME=$(dirname $CHECKPOINT_TO_TAR)
29
+ DIRNAME=${TEMPNAME:2}
30
+ BASENAME=$(basename $CHECKPOINT_TO_TAR)
31
+
32
+ CHECKPOINT_TO_TAR=$DIRNAME/$BASENAME
33
+ CHECKPOINT_TAR_TO_FOLDER=$six_ALL_CCFRSTORE/checkpoints/$DIRNAME
34
+ CHECKPOINT_TAR_TO=$CHECKPOINT_TAR_TO_FOLDER/$BASENAME.tar
35
+
36
+ mkdir -p $CHECKPOINT_TAR_TO_FOLDER
37
+ echo $CHECKPOINT_TO_TAR
38
+ echo $CHECKPOINT_TAR_TO_FOLDER
39
+
40
+ # cvfj for bz2 compression; won't change much
41
+ tar cvf $CHECKPOINT_TAR_TO $CHECKPOINT_TO_TAR
42
+
43
+ popd
44
+
jz/model_storage/move_checkpoints_to_store_tr11e.slurm ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=move_to_tar # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=4 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-276%1
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ # DEBUG
14
+ # SLURM_ARRAY_TASK_ID=0 # 0-6549
15
+
16
+ pushd $six_ALL_CCFRWORK/checkpoints
17
+ # readarray CHECKPOINTS < <(find . -regex '\./tr11[a-z].*/global_step[0-9]*')
18
+ # DEBUG regex to test out only on tr11e-350
19
+ # readarray CHECKPOINTS < <(find . -regex '\./tr11e-350M-ml/.*/global_step[0-9]*')
20
+ # batch size 256 -> one out of 8 checkpoints for 1B tokens
21
+ readarray CHECKPOINTS < <(find . -regex '\./tr11e-350M-ml/.*/global_step[0-9]*[02468]000')
22
+
23
+ echo "Total number of checkpoints to tar: ${#CHECKPOINTS[@]}"
24
+
25
+ CHECKPOINT_TO_TAR=${CHECKPOINTS[$SLURM_ARRAY_TASK_ID]}
26
+ echo "Checkpoint to tar: $CHECKPOINT_TO_TAR"
27
+
28
+ TEMPNAME=$(dirname $CHECKPOINT_TO_TAR)
29
+ DIRNAME=${TEMPNAME:2}
30
+ BASENAME=$(basename $CHECKPOINT_TO_TAR)
31
+
32
+ CHECKPOINT_TO_TAR=$DIRNAME/$BASENAME
33
+ CHECKPOINT_TAR_TO_FOLDER=$six_ALL_CCFRSTORE/checkpoints/$DIRNAME
34
+ CHECKPOINT_TAR_TO=$CHECKPOINT_TAR_TO_FOLDER/$BASENAME.tar
35
+
36
+ mkdir -p $CHECKPOINT_TAR_TO_FOLDER
37
+ echo $CHECKPOINT_TO_TAR
38
+ echo $CHECKPOINT_TAR_TO
39
+
40
+ # cvfj for bz2 compression; won't change much
41
+ tar cvf $CHECKPOINT_TAR_TO $CHECKPOINT_TO_TAR
42
+
43
+ popd
jz/model_storage/move_checkpoints_to_store_tr11f.slurm ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11f_move_to_tar # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=4 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-155%1
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ # DEBUG
14
+ # SLURM_ARRAY_TASK_ID=0 # 0-6549
15
+
16
+ pushd $six_ALL_CCFRWORK/checkpoints
17
+ # readarray CHECKPOINTS < <(find . -regex '\./tr11[a-z].*/global_step[0-9]*')
18
+ # DEBUG regex to test out only on tr11e-350
19
+ # readarray CHECKPOINTS < <(find . -regex '\./tr11e-350M-ml/.*/global_step[0-9]*')
20
+ # batch size 512 -> one out of 4 checkpoints for 1B tokens
21
+ readarray CHECKPOINTS < <(find . -regex '\./tr11f-6B3-ml/.*/global_step[0-9]*000')
22
+
23
+ echo "Total number of checkpoints to tar: ${#CHECKPOINTS[@]}"
24
+
25
+ CHECKPOINT_TO_TAR=${CHECKPOINTS[$SLURM_ARRAY_TASK_ID]}
26
+ echo "Checkpoint to tar: $CHECKPOINT_TO_TAR"
27
+
28
+ TEMPNAME=$(dirname $CHECKPOINT_TO_TAR)
29
+ DIRNAME=${TEMPNAME:2}
30
+ BASENAME=$(basename $CHECKPOINT_TO_TAR)
31
+
32
+ CHECKPOINT_TO_TAR=$DIRNAME/$BASENAME
33
+ CHECKPOINT_TAR_TO_FOLDER=$six_ALL_CCFRSTORE/checkpoints/$DIRNAME
34
+ CHECKPOINT_TAR_TO=$CHECKPOINT_TAR_TO_FOLDER/$BASENAME.tar
35
+
36
+ mkdir -p $CHECKPOINT_TAR_TO_FOLDER
37
+ echo $CHECKPOINT_TO_TAR
38
+ echo $CHECKPOINT_TAR_TO
39
+
40
+ # cvfj for bz2 compression; won't change much
41
+ tar cvf $CHECKPOINT_TAR_TO $CHECKPOINT_TO_TAR
42
+
43
+ popd
44
+
jz/scripts/custom_callbacks.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from transformers import TrainerCallback, is_tensorboard_available
4
+ from transformers.integrations import rewrite_logs
5
+
6
+
7
+ class LogFlosCallback(TrainerCallback):
8
+ """
9
+ A :class:`~transformers.TrainerCallback` that adds current flos to every log.
10
+ """
11
+
12
+ def on_log(self, args, state, control, logs=None, **kwargs):
13
+ logs["total_flos"] = state.total_flos
14
+
15
+
16
+ class TensorBoardFloIndexedCallback(TrainerCallback):
17
+ """
18
+ A :class:`~transformers.TrainerCallback` that sends the logs to `TensorBoard
19
+ <https://www.tensorflow.org/tensorboard>`__.
20
+
21
+ Args:
22
+ tb_writer (:obj:`SummaryWriter`, `optional`):
23
+ The writer to use. Will instantiate one if not set.
24
+ """
25
+
26
+ def __init__(self, tb_writer=None):
27
+ has_tensorboard = is_tensorboard_available()
28
+ assert (
29
+ has_tensorboard
30
+ ), "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX."
31
+ if has_tensorboard:
32
+ try:
33
+ from torch.utils.tensorboard import SummaryWriter # noqa: F401
34
+
35
+ self._SummaryWriter = SummaryWriter
36
+ except ImportError:
37
+ try:
38
+ from tensorboardX import SummaryWriter
39
+
40
+ self._SummaryWriter = SummaryWriter
41
+ except ImportError:
42
+ self._SummaryWriter = None
43
+ else:
44
+ self._SummaryWriter = None
45
+ self.tb_writer = tb_writer
46
+
47
+ def _init_summary_writer(self, args, log_dir=None):
48
+ log_dir = log_dir or args.logging_dir
49
+ if self._SummaryWriter is not None:
50
+ self.tb_writer = self._SummaryWriter(log_dir=log_dir)
51
+
52
+ def on_train_begin(self, args, state, control, **kwargs):
53
+ if not state.is_world_process_zero:
54
+ return
55
+
56
+ log_dir = None
57
+
58
+ if state.is_hyper_param_search:
59
+ trial_name = state.trial_name
60
+ if trial_name is not None:
61
+ log_dir = os.path.join(args.logging_dir, trial_name)
62
+
63
+ self._init_summary_writer(args, log_dir)
64
+
65
+ if self.tb_writer is not None:
66
+ self.tb_writer.add_text("args", args.to_json_string())
67
+ if "model" in kwargs:
68
+ model = kwargs["model"]
69
+ if hasattr(model, "config") and model.config is not None:
70
+ model_config_json = model.config.to_json_string()
71
+ self.tb_writer.add_text("model_config", model_config_json)
72
+ # Version of TensorBoard coming from tensorboardX does not have this method.
73
+ if hasattr(self.tb_writer, "add_hparams"):
74
+ self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={})
75
+
76
+ def on_log(self, args, state, control, logs=None, **kwargs):
77
+ if not state.is_world_process_zero:
78
+ return
79
+
80
+ if self.tb_writer is None:
81
+ self._init_summary_writer(args)
82
+
83
+ if self.tb_writer is not None:
84
+ logs = rewrite_logs(logs)
85
+ self.tb_writer.add_scalar("Conversion/x steps - y flos", state.total_flos, state.global_step)
86
+ self.tb_writer.add_scalar("Conversion/x flos - y steps", state.global_step, state.total_flos)
87
+ for k, v in logs.items():
88
+ if isinstance(v, (int, float)):
89
+ self.tb_writer.add_scalar(f"Flos/{k}", v, state.total_flos)
90
+ self.tb_writer.add_scalar(f"Steps/{k}", v, state.global_step)
91
+ self.tb_writer.flush()
92
+
93
+ def on_train_end(self, args, state, control, **kwargs):
94
+ if self.tb_writer:
95
+ self.tb_writer.close()
jz/scripts/run_clm.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=causal-lm
21
+ """
22
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ from dataclasses import dataclass, field
29
+ from typing import Optional
30
+
31
+ import torch.distributed
32
+ from datasets import load_dataset
33
+
34
+ import transformers
35
+ from transformers import (
36
+ CONFIG_MAPPING,
37
+ MODEL_FOR_CAUSAL_LM_MAPPING,
38
+ AutoConfig,
39
+ AutoModelForCausalLM,
40
+ AutoTokenizer,
41
+ HfArgumentParser,
42
+ Trainer,
43
+ TrainingArguments,
44
+ default_data_collator,
45
+ set_seed,
46
+ )
47
+ from transformers.testing_utils import CaptureLogger
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+
51
+ ### I very much dislike this solution. `run_clm.py` should probably be at the root, or install as an editable package.
52
+ import os
53
+ currentdir = os.path.dirname(os.path.realpath(__file__))
54
+ parentdir = os.path.dirname(currentdir)
55
+ sys.path.append(parentdir)
56
+ ###
57
+
58
+ from models.decoder_only_t5 import DecoderOnlyT5Config, DecoderOnlyT5LMHeadModel
59
+
60
+ CONFIG_MAPPING["decoder_only_t5"] = DecoderOnlyT5Config
61
+ MODEL_FOR_CAUSAL_LM_MAPPING[DecoderOnlyT5Config] = DecoderOnlyT5LMHeadModel
62
+
63
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
64
+ from custom_callbacks import LogFlosCallback, TensorBoardFloIndexedCallback
65
+
66
+ check_min_version("4.6.0.dev0")
67
+
68
+ logging.basicConfig(
69
+ format="%(asctime)s - %(levelname)s - %(process)d - %(name)s - %(message)s",
70
+ datefmt="%m/%d/%Y %H:%M:%S",
71
+ level=logging.INFO,
72
+ )
73
+ logger = logging.getLogger(__name__)
74
+
75
+
76
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
77
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
78
+
79
+
80
+ @dataclass
81
+ class ModelArguments:
82
+ """
83
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
84
+ """
85
+
86
+ model_name_or_path: Optional[str] = field(
87
+ default=None,
88
+ metadata={
89
+ "help": "The model checkpoint for weights initialization."
90
+ "Don't set if you want to train a model from scratch."
91
+ },
92
+ )
93
+ model_type: Optional[str] = field(
94
+ default=None,
95
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
96
+ )
97
+ config_name: Optional[str] = field(
98
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
99
+ )
100
+ tokenizer_name: Optional[str] = field(
101
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
102
+ )
103
+ cache_dir: Optional[str] = field(
104
+ default=None,
105
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
106
+ )
107
+ use_fast_tokenizer: bool = field(
108
+ default=True,
109
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
110
+ )
111
+ model_revision: str = field(
112
+ default="main",
113
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
114
+ )
115
+ use_auth_token: bool = field(
116
+ default=False,
117
+ metadata={
118
+ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
119
+ "with private models)."
120
+ },
121
+ )
122
+
123
+
124
+ @dataclass
125
+ class ConfigArguments:
126
+ """
127
+ Arguments defining the new model we're about to train when training from scratch
128
+ """
129
+
130
+ n_ctx: Optional[int] = field(default=1024, metadata={"help": "Dimensionality of the causal mask"})
131
+ n_embd: Optional[int] = field(
132
+ default=768, metadata={"help": "Dimensionality of the embeddings and hidden states."}
133
+ )
134
+ n_layer: Optional[int] = field(default=12, metadata={"help": "Number of hidden layers."})
135
+ n_head: Optional[int] = field(default=12, metadata={"help": "Number of attention heads for each attention layer."})
136
+ n_inner: Optional[int] = field(default=None, metadata={"help": "Dimensionality of the inner feed-forward layers."})
137
+
138
+
139
+ @dataclass
140
+ class DataTrainingArguments:
141
+ """
142
+ Arguments pertaining to what data we are going to input our model for training and eval.
143
+ """
144
+
145
+ sanity: bool = field(
146
+ default=False, metadata={"help": "Only use fraction of the dataset"}
147
+ )
148
+ dataset_name: Optional[str] = field(
149
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
150
+ )
151
+ dataset_config_name: Optional[str] = field(
152
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
153
+ )
154
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
155
+ validation_file: Optional[str] = field(
156
+ default=None,
157
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
158
+ )
159
+ max_train_samples: Optional[int] = field(
160
+ default=None,
161
+ metadata={
162
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
163
+ "value if set."
164
+ },
165
+ )
166
+ max_val_samples: Optional[int] = field(
167
+ default=None,
168
+ metadata={
169
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
170
+ "value if set."
171
+ },
172
+ )
173
+
174
+ block_size: Optional[int] = field(
175
+ default=None,
176
+ metadata={
177
+ "help": "Optional input sequence length after tokenization. "
178
+ "The training dataset will be truncated in block of this size for training. "
179
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
180
+ },
181
+ )
182
+ overwrite_cache: bool = field(
183
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
184
+ )
185
+ validation_split_percentage: Optional[int] = field(
186
+ default=5,
187
+ metadata={
188
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
189
+ },
190
+ )
191
+ preprocessing_num_workers: Optional[int] = field(
192
+ default=None,
193
+ metadata={"help": "The number of processes to use for the preprocessing."},
194
+ )
195
+
196
+ def __post_init__(self):
197
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
198
+ raise ValueError("Need either a dataset name or a training/validation file.")
199
+ else:
200
+ if self.train_file is not None:
201
+ extension = self.train_file.split(".")[-1]
202
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
203
+ if self.validation_file is not None:
204
+ extension = self.validation_file.split(".")[-1]
205
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
206
+
207
+
208
+ def main():
209
+ # See all possible arguments in src/transformers/training_args.py
210
+ # or by passing the --help flag to this script.
211
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
212
+
213
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ConfigArguments))
214
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
215
+ # If we pass only one argument to the script and it's the path to a json file,
216
+ # let's parse it to get our arguments.
217
+ model_args, data_args, training_args, config_args = parser.parse_json_file(
218
+ json_file=os.path.abspath(sys.argv[1])
219
+ )
220
+ else:
221
+ model_args, data_args, training_args, config_args = parser.parse_args_into_dataclasses()
222
+
223
+ # Detecting last checkpoint.
224
+ last_checkpoint = None
225
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
226
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
227
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
228
+ raise ValueError(
229
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
230
+ "Use --overwrite_output_dir to overcome."
231
+ )
232
+ elif last_checkpoint is not None:
233
+ logger.info(
234
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
235
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
236
+ )
237
+
238
+ # Setup logging
239
+ logging.basicConfig(
240
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
241
+ datefmt="%m/%d/%Y %H:%M:%S",
242
+ handlers=[logging.StreamHandler(sys.stdout)],
243
+ )
244
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
245
+
246
+ # Log on each process the small summary:
247
+ logger.warning(
248
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
249
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
250
+ )
251
+ # Set the verbosity to info of the Transformers logger (on main process only):
252
+ if is_main_process(training_args.local_rank):
253
+ transformers.utils.logging.set_verbosity_info()
254
+ transformers.utils.logging.enable_default_handler()
255
+ transformers.utils.logging.enable_explicit_format()
256
+ logger.info(f"Training/evaluation parameters {training_args}")
257
+
258
+ # Set seed before initializing model.
259
+ set_seed(training_args.seed)
260
+
261
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
262
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
263
+ # (the dataset will be downloaded automatically from the datasets Hub).
264
+ #
265
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
266
+ # 'text' is found. You can easily tweak this behavior (see below).
267
+ #
268
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
269
+ # download the dataset.
270
+ if data_args.dataset_name is not None:
271
+ # Downloading and loading a dataset from the hub.
272
+ datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, keep_in_memory=False, cache_dir=model_args.cache_dir)
273
+ if "validation" not in datasets.keys():
274
+ datasets["validation"] = load_dataset(
275
+ data_args.dataset_name,
276
+ data_args.dataset_config_name,
277
+ split=f"train[:{data_args.validation_split_percentage}%]",
278
+ keep_in_memory=False,
279
+ cache_dir=model_args.cache_dir
280
+ )
281
+ datasets["train"] = load_dataset(
282
+ data_args.dataset_name,
283
+ data_args.dataset_config_name,
284
+ split=f"train[{data_args.validation_split_percentage}%:]",
285
+ keep_in_memory=False,
286
+ cache_dir=model_args.cache_dir
287
+ )
288
+ else:
289
+ data_files = {}
290
+ if data_args.train_file is not None:
291
+ data_files["train"] = data_args.train_file
292
+ if data_args.validation_file is not None:
293
+ data_files["validation"] = data_args.validation_file
294
+ extension = (
295
+ data_args.train_file.split(".")[-1]
296
+ if data_args.train_file is not None
297
+ else data_args.validation_file.split(".")[-1]
298
+ )
299
+ if extension == "txt":
300
+ extension = "text"
301
+ datasets = load_dataset(extension, data_files=data_files, keep_in_memory=False, cache_dir=model_args.cache_dir)
302
+ if data_args.sanity:
303
+ datasets["train"] = datasets["train"].shard(100, index=0, contiguous=True)
304
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
305
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
306
+
307
+ # Load pretrained model and tokenizer
308
+ #
309
+ # Distributed training:
310
+ # The .from_pretrained methods guarantee that only one local process can concurrently
311
+ # download model & vocab.
312
+
313
+ config_kwargs = {
314
+ "cache_dir": model_args.cache_dir,
315
+ "revision": model_args.model_revision,
316
+ "use_auth_token": True if model_args.use_auth_token else None,
317
+ }
318
+ if model_args.config_name:
319
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
320
+ elif model_args.model_name_or_path:
321
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
322
+ else:
323
+ config = CONFIG_MAPPING[model_args.model_type](**vars(config_args), **config_kwargs)
324
+ logger.warning("You are instantiating a new config instance from scratch.")
325
+
326
+ tokenizer_kwargs = {
327
+ "cache_dir": model_args.cache_dir,
328
+ "use_fast": model_args.use_fast_tokenizer,
329
+ "revision": model_args.model_revision,
330
+ "use_auth_token": True if model_args.use_auth_token else None,
331
+ }
332
+ if model_args.tokenizer_name:
333
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
334
+ elif model_args.model_name_or_path:
335
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
336
+ else:
337
+ raise ValueError(
338
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
339
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
340
+ )
341
+
342
+ if model_args.model_name_or_path:
343
+ model = AutoModelForCausalLM.from_pretrained(
344
+ model_args.model_name_or_path,
345
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
346
+ config=config,
347
+ cache_dir=model_args.cache_dir,
348
+ revision=model_args.model_revision,
349
+ use_auth_token=True if model_args.use_auth_token else None,
350
+ )
351
+ else:
352
+ logger.info("Training new model from scratch")
353
+ model = AutoModelForCausalLM.from_config(config)
354
+
355
+ model.resize_token_embeddings(len(tokenizer))
356
+
357
+ # Preprocessing the datasets.
358
+ # First we tokenize all the texts.
359
+ if training_args.do_train:
360
+ column_names = datasets["train"].column_names
361
+ else:
362
+ column_names = datasets["validation"].column_names
363
+ text_column_name = "text" if "text" in column_names else column_names[0]
364
+
365
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
366
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
367
+
368
+ datasets = datasets.shuffle()
369
+ def tokenize_function(examples):
370
+ with CaptureLogger(tok_logger) as cl:
371
+ output = tokenizer(examples[text_column_name])
372
+ # clm input could be much much longer than block_size
373
+ if "Token indices sequence length is longer than the" in cl.out:
374
+ tok_logger.warning(
375
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
376
+ )
377
+ return output
378
+
379
+ # Ensures only the main process does dataset pre-processing; the other ones will load the `map` cache
380
+ if not is_main_process(training_args.local_rank):
381
+ print("waiting for main process to execute mapping")
382
+ torch.distributed.barrier()
383
+
384
+ logger.info("Mapping dataset to tokenized dataset.",)
385
+ tokenized_datasets = datasets.map(
386
+ tokenize_function,
387
+ batched=True,
388
+ num_proc=data_args.preprocessing_num_workers,
389
+ remove_columns=column_names,
390
+ load_from_cache_file=not data_args.overwrite_cache,
391
+ keep_in_memory=False
392
+ )
393
+
394
+ if data_args.block_size is None:
395
+ block_size = tokenizer.model_max_length
396
+ if block_size > 1024:
397
+ logger.warning(
398
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
399
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
400
+ )
401
+ block_size = 1024
402
+ else:
403
+ if data_args.block_size > tokenizer.model_max_length:
404
+ logger.warning(
405
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
406
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
407
+ )
408
+ # block_size = min(data_args.block_size, tokenizer.model_max_length)
409
+ block_size = data_args.block_size
410
+
411
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
412
+ def group_texts(examples):
413
+ # Concatenate all texts.
414
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
415
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
416
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
417
+ # customize this part to your needs.
418
+ total_length = (total_length // block_size) * block_size
419
+ # Split by chunks of max_len.
420
+ result = {
421
+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
422
+ for k, t in concatenated_examples.items()
423
+ }
424
+ result["labels"] = result["input_ids"].copy()
425
+ return result
426
+
427
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
428
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
429
+ # to preprocess.
430
+ #
431
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
432
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
433
+
434
+ logger.info("Chunking tokenized dataset.")
435
+ lm_datasets = tokenized_datasets.map(
436
+ group_texts,
437
+ batched=True,
438
+ num_proc=data_args.preprocessing_num_workers,
439
+ load_from_cache_file=not data_args.overwrite_cache,
440
+ keep_in_memory=False
441
+ )
442
+
443
+ # Now the other ones can catch up.
444
+ if training_args.local_rank != -1 and is_main_process(training_args.local_rank):
445
+ print("loading results from main process")
446
+ torch.distributed.barrier()
447
+
448
+ if training_args.do_train:
449
+ if "train" not in tokenized_datasets:
450
+ raise ValueError("--do_train requires a train dataset")
451
+ train_dataset = lm_datasets["train"]
452
+ if data_args.max_train_samples is not None:
453
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
454
+
455
+ if training_args.do_eval:
456
+ if "validation" not in tokenized_datasets:
457
+ cutoff = data_args.validation_split_percentage * len(lm_datasets["train"]) // 100
458
+ train_dataset = lm_datasets["train"].select(range(cutoff, len(lm_datasets["train"])))
459
+ eval_dataset = lm_datasets["train"].select(range(cutoff))
460
+ else:
461
+ eval_dataset = lm_datasets["validation"]
462
+ if data_args.max_val_samples is not None:
463
+ eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
464
+
465
+
466
+ # Initialize our Trainer
467
+ trainer = Trainer(
468
+ model=model,
469
+ args=training_args,
470
+ train_dataset=train_dataset if training_args.do_train else None,
471
+ eval_dataset=eval_dataset if training_args.do_eval else None,
472
+ tokenizer=tokenizer,
473
+ # Data collator will default to DataCollatorWithPadding, so we change it.
474
+ data_collator=default_data_collator,
475
+ callbacks=[LogFlosCallback, TensorBoardFloIndexedCallback]
476
+ )
477
+
478
+ # Training
479
+ if training_args.do_train:
480
+ checkpoint = None
481
+ if training_args.resume_from_checkpoint is not None:
482
+ checkpoint = training_args.resume_from_checkpoint
483
+ elif last_checkpoint is not None:
484
+ checkpoint = last_checkpoint
485
+
486
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
487
+ trainer.save_model() # Saves the tokenizer too for easy upload
488
+
489
+ metrics = train_result.metrics
490
+
491
+ max_train_samples = (
492
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
493
+ )
494
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
495
+
496
+ trainer.log_metrics("train", metrics)
497
+ trainer.save_metrics("train", metrics)
498
+ trainer.save_state()
499
+
500
+ # Evaluation
501
+ if training_args.do_eval:
502
+ logger.info("*** Evaluate ***")
503
+
504
+ metrics = trainer.evaluate()
505
+
506
+ metrics["eval_samples"] = len(eval_dataset)
507
+ perplexity = math.exp(metrics["eval_loss"])
508
+ metrics["perplexity"] = perplexity
509
+
510
+ trainer.log_metrics("eval", metrics)
511
+ trainer.save_metrics("eval", metrics)
512
+
513
+
514
+ def _mp_fn(index):
515
+ # For xla_spawn (TPUs)
516
+ main()
517
+
518
+
519
+ if __name__ == "__main__":
520
+ main()
jz/scripts/run_clm_prompted.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Prompted version of run_clm.
18
+ """
19
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
20
+
21
+ import logging
22
+ import math
23
+ import os
24
+ import sys
25
+ from dataclasses import dataclass, field
26
+ import torch
27
+ from typing import Optional, Dict, List, Union
28
+
29
+ from datasets import load_dataset, load_from_disk
30
+
31
+ import transformers
32
+ from transformers import (
33
+ CONFIG_MAPPING,
34
+ MODEL_FOR_CAUSAL_LM_MAPPING,
35
+ AutoConfig,
36
+ AutoModelForCausalLM,
37
+ AutoTokenizer,
38
+ HfArgumentParser,
39
+ Trainer,
40
+ TrainingArguments,
41
+ default_data_collator,
42
+ set_seed,
43
+ )
44
+ from transformers.testing_utils import CaptureLogger
45
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
46
+ from transformers.utils import check_min_version
47
+ from transformers.file_utils import PaddingStrategy
48
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
49
+
50
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
51
+ check_min_version("4.6.0.dev0")
52
+
53
+ logging.basicConfig(
54
+ format="%(asctime)s - %(levelname)s - %(process)d - %(name)s - %(message)s",
55
+ datefmt="%m/%d/%Y %H:%M:%S",
56
+ level=logging.INFO,
57
+ )
58
+ logger = logging.getLogger(__name__)
59
+
60
+
61
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
62
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
63
+
64
+ @dataclass
65
+ class MyDataCollatorWithPadding:
66
+ """
67
+ Custom version of `DataCollatorWithPadding`.
68
+ """
69
+
70
+ tokenizer: PreTrainedTokenizerBase
71
+ padding: Union[bool, str, PaddingStrategy] = True
72
+ max_length: Optional[int] = None
73
+ pad_to_multiple_of: Optional[int] = None
74
+
75
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
76
+ batch = self.tokenizer.pad(
77
+ features,
78
+ padding=self.padding,
79
+ max_length=self.max_length,
80
+ pad_to_multiple_of=self.pad_to_multiple_of,
81
+ )
82
+ if "label" in batch:
83
+ batch["labels"] = batch["label"]
84
+ del batch["label"]
85
+ if "label_ids" in batch:
86
+ batch["labels"] = batch["label_ids"]
87
+ del batch["label_ids"]
88
+ # Padding labels
89
+ max_l = len(batch["input_ids"][0])
90
+ result = []
91
+ for i in batch["labels"]:
92
+ result.append(i + [-100]*(max_l - len(i)))
93
+ batch["labels"] = result
94
+ for k, v in batch.items():
95
+ batch[k] = torch.tensor(v)
96
+ return batch
97
+
98
+ @dataclass
99
+ class ModelArguments:
100
+ """
101
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
102
+ """
103
+
104
+ model_name_or_path: Optional[str] = field(
105
+ default=None,
106
+ metadata={
107
+ "help": "The model checkpoint for weights initialization."
108
+ "Don't set if you want to train a model from scratch."
109
+ },
110
+ )
111
+ model_type: Optional[str] = field(
112
+ default=None,
113
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
114
+ )
115
+ config_name: Optional[str] = field(
116
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
117
+ )
118
+ tokenizer_name: Optional[str] = field(
119
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
120
+ )
121
+ cache_dir: Optional[str] = field(
122
+ default=None,
123
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
124
+ )
125
+ use_fast_tokenizer: bool = field(
126
+ default=True,
127
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
128
+ )
129
+ model_revision: str = field(
130
+ default="main",
131
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
132
+ )
133
+ use_auth_token: bool = field(
134
+ default=False,
135
+ metadata={
136
+ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
137
+ "with private models)."
138
+ },
139
+ )
140
+
141
+
142
+ @dataclass
143
+ class DataTrainingArguments:
144
+ """
145
+ Arguments pertaining to what data we are going to input our model for training and eval.
146
+ """
147
+
148
+ dataset_name: Optional[str] = field(
149
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
150
+ )
151
+ dataset_config_name: Optional[str] = field(
152
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
153
+ )
154
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
155
+ validation_file: Optional[str] = field(
156
+ default=None,
157
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
158
+ )
159
+ max_train_samples: Optional[int] = field(
160
+ default=None,
161
+ metadata={
162
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
163
+ "value if set."
164
+ },
165
+ )
166
+ max_val_samples: Optional[int] = field(
167
+ default=None,
168
+ metadata={
169
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
170
+ "value if set."
171
+ },
172
+ )
173
+
174
+ block_size: Optional[int] = field(
175
+ default=None,
176
+ metadata={
177
+ "help": "Optional input sequence length after tokenization. "
178
+ "The training dataset will be truncated in block of this size for training. "
179
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
180
+ },
181
+ )
182
+ overwrite_cache: bool = field(
183
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
184
+ )
185
+ validation_split_percentage: Optional[int] = field(
186
+ default=5,
187
+ metadata={
188
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
189
+ },
190
+ )
191
+ preprocessing_num_workers: Optional[int] = field(
192
+ default=None,
193
+ metadata={"help": "The number of processes to use for the preprocessing."},
194
+ )
195
+
196
+ def __post_init__(self):
197
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
198
+ raise ValueError("Need either a dataset name or a training/validation file.")
199
+ else:
200
+ if self.train_file is not None:
201
+ extension = self.train_file.split(".")[-1]
202
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
203
+ if self.validation_file is not None:
204
+ extension = self.validation_file.split(".")[-1]
205
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
206
+
207
+
208
+ def main():
209
+ # See all possible arguments in src/transformers/training_args.py
210
+ # or by passing the --help flag to this script.
211
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
212
+
213
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
214
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
215
+ # If we pass only one argument to the script and it's the path to a json file,
216
+ # let's parse it to get our arguments.
217
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
218
+ else:
219
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
220
+
221
+ # Detecting last checkpoint.
222
+ last_checkpoint = None
223
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
224
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
225
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
226
+ raise ValueError(
227
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
228
+ "Use --overwrite_output_dir to overcome."
229
+ )
230
+ elif last_checkpoint is not None:
231
+ logger.info(
232
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
233
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
234
+ )
235
+
236
+ # Setup logging
237
+ logging.basicConfig(
238
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
239
+ datefmt="%m/%d/%Y %H:%M:%S",
240
+ handlers=[logging.StreamHandler(sys.stdout)],
241
+ )
242
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
243
+
244
+ # Log on each process the small summary:
245
+ logger.warning(
246
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
247
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
248
+ )
249
+ # Set the verbosity to info of the Transformers logger (on main process only):
250
+ if is_main_process(training_args.local_rank):
251
+ transformers.utils.logging.set_verbosity_info()
252
+ transformers.utils.logging.enable_default_handler()
253
+ transformers.utils.logging.enable_explicit_format()
254
+ logger.info(f"Training/evaluation parameters {training_args}")
255
+
256
+ # Set seed before initializing model.
257
+ set_seed(training_args.seed)
258
+
259
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
260
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
261
+ # (the dataset will be downloaded automatically from the datasets Hub).
262
+ #
263
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
264
+ # 'text' is found. You can easily tweak this behavior (see below).
265
+ #
266
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
267
+ # download the dataset.
268
+ # if data_args.dataset_name is not None:
269
+ # # Downloading and loading a dataset from the hub.
270
+ # datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
271
+ # if "validation" not in datasets.keys():
272
+ # datasets["validation"] = load_dataset(
273
+ # data_args.dataset_name,
274
+ # data_args.dataset_config_name,
275
+ # split=f"train[:{data_args.validation_split_percentage}%]",
276
+ # )
277
+ # datasets["train"] = load_dataset(
278
+ # data_args.dataset_name,
279
+ # data_args.dataset_config_name,
280
+ # split=f"train[{data_args.validation_split_percentage}%:]",
281
+ # )
282
+ # else:
283
+ # data_files = {}
284
+ # if data_args.train_file is not None:
285
+ # data_files["train"] = data_args.train_file
286
+ # if data_args.validation_file is not None:
287
+ # data_files["validation"] = data_args.validation_file
288
+ # extension = (
289
+ # data_args.train_file.split(".")[-1]
290
+ # if data_args.train_file is not None
291
+ # else data_args.validation_file.split(".")[-1]
292
+ # )
293
+ # if extension == "txt":
294
+ # extension = "text"
295
+ # datasets = load_dataset(extension, data_files=data_files)
296
+ datasets = load_from_disk(dataset_path=data_args.dataset_name)
297
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
298
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
299
+
300
+ # Load pretrained model and tokenizer
301
+ #
302
+ # Distributed training:
303
+ # The .from_pretrained methods guarantee that only one local process can concurrently
304
+ # download model & vocab.
305
+
306
+ config_kwargs = {
307
+ "cache_dir": model_args.cache_dir,
308
+ "revision": model_args.model_revision,
309
+ "use_auth_token": True if model_args.use_auth_token else None,
310
+ }
311
+ if model_args.config_name:
312
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
313
+ elif model_args.model_name_or_path:
314
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
315
+ else:
316
+ config = CONFIG_MAPPING[model_args.model_type]()
317
+ logger.warning("You are instantiating a new config instance from scratch.")
318
+
319
+ tokenizer_kwargs = {
320
+ "cache_dir": model_args.cache_dir,
321
+ "use_fast": model_args.use_fast_tokenizer,
322
+ "revision": model_args.model_revision,
323
+ "use_auth_token": True if model_args.use_auth_token else None,
324
+ }
325
+ if model_args.tokenizer_name:
326
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
327
+ elif model_args.model_name_or_path:
328
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
329
+ else:
330
+ raise ValueError(
331
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
332
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
333
+ )
334
+ if tokenizer.pad_token_id is None and tokenizer.eos_token_id is not None:
335
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{tokenizer.eos_token_id}.")
336
+ tokenizer.pad_token = tokenizer.eos_token
337
+
338
+ if model_args.model_name_or_path:
339
+ model = AutoModelForCausalLM.from_pretrained(
340
+ model_args.model_name_or_path,
341
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
342
+ config=config,
343
+ cache_dir=model_args.cache_dir,
344
+ revision=model_args.model_revision,
345
+ use_auth_token=True if model_args.use_auth_token else None,
346
+ )
347
+ else:
348
+ logger.info("Training new model from scratch")
349
+ model = AutoModelForCausalLM.from_config(config)
350
+
351
+ model.resize_token_embeddings(len(tokenizer))
352
+
353
+ # Preprocessing the datasets.
354
+ # First we tokenize all the texts.
355
+ if training_args.do_train:
356
+ column_names = datasets["train"].column_names
357
+ else:
358
+ column_names = datasets["validation"].column_names
359
+ text_column_name = "text" if "text" in column_names else column_names[0]
360
+
361
+ def tokenize_function(examples):
362
+ def tok_f_ids(string):
363
+ return tokenizer(string, return_attention_mask=False)["input_ids"]
364
+
365
+ texts, texts_a, texts_b = [], [], []
366
+
367
+ unprompted_texts = examples["text"]
368
+ prompting_instances = examples["prompting_instances"]
369
+
370
+ for ump_text, ppt_instances in zip(unprompted_texts, prompting_instances):
371
+ if ppt_instances:
372
+ for i, p, o in zip(ppt_instances["input"], ppt_instances["prompt"], ppt_instances["output"]):
373
+ texts.append([])
374
+ texts_a.append(
375
+ tok_f_ids(i) \
376
+ + [tokenizer.eos_token_id] \
377
+ + tok_f_ids(p) \
378
+ + [tokenizer.eos_token_id]
379
+ )
380
+ texts_b.append(tok_f_ids(o))
381
+ else:
382
+ texts.append(tok_f_ids(ump_text))
383
+ texts_a.append([])
384
+ texts_b.append([])
385
+ return {
386
+ "text_full": texts,
387
+ "text_a": texts_a,
388
+ "text_b": texts_b,
389
+ }
390
+
391
+ datasets = datasets.shuffle()
392
+ logger.info("Mapping dataset to tokenized dataset.",)
393
+ tokenized_datasets = datasets.map(
394
+ tokenize_function,
395
+ batched=True,
396
+ num_proc=data_args.preprocessing_num_workers,
397
+ remove_columns=column_names,
398
+ load_from_cache_file=not data_args.overwrite_cache,
399
+ )
400
+
401
+ if data_args.block_size is None:
402
+ block_size = tokenizer.model_max_length
403
+ if block_size > 1024:
404
+ logger.warning(
405
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
406
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
407
+ )
408
+ block_size = 1024
409
+ else:
410
+ if data_args.block_size > tokenizer.model_max_length:
411
+ logger.warning(
412
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
413
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
414
+ )
415
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
416
+
417
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
418
+ def group_texts(examples):
419
+ texts = examples["text_full"]
420
+ texts_a = examples["text_a"]
421
+ texts_b = examples["text_b"]
422
+
423
+ result = {
424
+ "input_ids": [],
425
+ "labels": [],
426
+ "attention_mask": [],
427
+ "length": [],
428
+ }
429
+ n = int(block_size/2)
430
+ for t, t_a, t_b in zip(texts, texts_a, texts_b):
431
+ if t == []:
432
+ cut_t_a = t_a[-n:]
433
+ cut_t_b = t_b[:n]
434
+ if len(cut_t_b) < 20:
435
+ continue
436
+ result["input_ids"].append(cut_t_a + cut_t_b)
437
+ result["labels"].append([-100]*len(cut_t_a) + cut_t_b)
438
+ else:
439
+ total_length = len(t)
440
+ total_length = (total_length // block_size) * block_size
441
+ for i in range (0, total_length, block_size):
442
+ sub_seq = t[i : i + block_size]
443
+ result["input_ids"].append(sub_seq)
444
+ result["labels"].append(sub_seq)
445
+ for i in result["labels"]:
446
+ result["attention_mask"].append([1]*len(i))
447
+ result["length"].append(len(i))
448
+ return result
449
+
450
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
451
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
452
+ # to preprocess.
453
+ #
454
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
455
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
456
+
457
+ logger.info("Chunking tokenized dataset.")
458
+ lm_datasets = tokenized_datasets.map(
459
+ group_texts,
460
+ batched=True,
461
+ num_proc=data_args.preprocessing_num_workers,
462
+ remove_columns=tokenized_datasets["train"].column_names,
463
+ load_from_cache_file=not data_args.overwrite_cache,
464
+ )
465
+
466
+ if training_args.do_train:
467
+ if "train" not in tokenized_datasets:
468
+ raise ValueError("--do_train requires a train dataset")
469
+ train_dataset = lm_datasets["train"]
470
+ if data_args.max_train_samples is not None:
471
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
472
+
473
+ if training_args.do_eval:
474
+ if "validation" not in tokenized_datasets:
475
+ raise ValueError("--do_eval requires a validation dataset")
476
+ eval_dataset = lm_datasets["validation"]
477
+ if data_args.max_val_samples is not None:
478
+ eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
479
+
480
+ # Initialize our Trainer
481
+ trainer = Trainer(
482
+ model=model,
483
+ args=training_args,
484
+ train_dataset=train_dataset if training_args.do_train else None,
485
+ eval_dataset=eval_dataset if training_args.do_eval else None,
486
+ tokenizer=tokenizer,
487
+ # Data collator will default to DataCollatorWithPadding, so we change it.
488
+ data_collator=MyDataCollatorWithPadding(tokenizer=tokenizer, padding=True),
489
+ )
490
+
491
+ # Training
492
+ if training_args.do_train:
493
+ if last_checkpoint is not None:
494
+ checkpoint = last_checkpoint
495
+ elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
496
+ checkpoint = model_args.model_name_or_path
497
+ else:
498
+ checkpoint = None
499
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
500
+ trainer.save_model() # Saves the tokenizer too for easy upload
501
+
502
+ metrics = train_result.metrics
503
+
504
+ max_train_samples = (
505
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
506
+ )
507
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
508
+
509
+ trainer.log_metrics("train", metrics)
510
+ trainer.save_metrics("train", metrics)
511
+ trainer.save_state()
512
+
513
+ # Evaluation
514
+ if training_args.do_eval:
515
+ logger.info("*** Evaluate ***")
516
+
517
+ metrics = trainer.evaluate()
518
+
519
+ max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
520
+ metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
521
+ perplexity = math.exp(metrics["eval_loss"])
522
+ metrics["perplexity"] = perplexity
523
+
524
+ trainer.log_metrics("eval", metrics)
525
+ trainer.save_metrics("eval", metrics)
526
+
527
+
528
+ def _mp_fn(index):
529
+ # For xla_spawn (TPUs)
530
+ main()
531
+
532
+
533
+ if __name__ == "__main__":
534
+ main()
jz/scripts/run_text2text.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tune a text-to-text model (T5, BART, ...) on a text file or dataset.
18
+ """
19
+
20
+ import logging
21
+ import math
22
+ import os
23
+ import sys
24
+ from dataclasses import dataclass, field
25
+ from typing import Optional
26
+
27
+ import torch.distributed
28
+ from datasets import load_dataset
29
+
30
+ import transformers
31
+ from transformers import (
32
+ CONFIG_MAPPING,
33
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
34
+ AutoConfig,
35
+ AutoModelForSeq2SeqLM,
36
+ AutoTokenizer,
37
+ HfArgumentParser,
38
+ Trainer,
39
+ TrainingArguments,
40
+ default_data_collator,
41
+ set_seed,
42
+ )
43
+ from transformers.testing_utils import CaptureLogger
44
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
45
+ from transformers.utils import check_min_version
46
+
47
+ ### I very much dislike this solution. `run_clm.py` should probably be at the root, or install as an editable package.
48
+ import os
49
+ currentdir = os.path.dirname(os.path.realpath(__file__))
50
+ parentdir = os.path.dirname(currentdir)
51
+ sys.path.append(parentdir)
52
+ ###
53
+
54
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
55
+ from custom_callbacks import LogFlosCallback, TensorBoardFloIndexedCallback
56
+
57
+ check_min_version("4.6.0.dev0")
58
+
59
+ logging.basicConfig(
60
+ format="%(asctime)s - %(levelname)s - %(process)d - %(name)s - %(message)s",
61
+ datefmt="%m/%d/%Y %H:%M:%S",
62
+ level=logging.INFO,
63
+ )
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys())
68
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
69
+
70
+
71
+ @dataclass
72
+ class ModelArguments:
73
+ """
74
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
75
+ """
76
+
77
+ model_name_or_path: Optional[str] = field(
78
+ default=None,
79
+ metadata={
80
+ "help": "The model checkpoint for weights initialization."
81
+ "Don't set if you want to train a model from scratch."
82
+ },
83
+ )
84
+ model_type: Optional[str] = field(
85
+ default=None,
86
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
87
+ )
88
+ config_name: Optional[str] = field(
89
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
90
+ )
91
+ tokenizer_name: Optional[str] = field(
92
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
93
+ )
94
+ cache_dir: Optional[str] = field(
95
+ default=None,
96
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
97
+ )
98
+ use_fast_tokenizer: bool = field(
99
+ default=True,
100
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
101
+ )
102
+ model_revision: str = field(
103
+ default="main",
104
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
105
+ )
106
+ use_auth_token: bool = field(
107
+ default=False,
108
+ metadata={
109
+ "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
110
+ "with private models)."
111
+ },
112
+ )
113
+
114
+
115
+ @dataclass
116
+ class ConfigArguments:
117
+ """
118
+ Arguments defining the new model we're about to train when training from scratch
119
+ """
120
+
121
+ n_ctx: Optional[int] = field(default=1024, metadata={"help": "Dimensionality of the causal mask"})
122
+ n_embd: Optional[int] = field(
123
+ default=768, metadata={"help": "Dimensionality of the embeddings and hidden states."}
124
+ )
125
+ n_layer: Optional[int] = field(default=12, metadata={"help": "Number of hidden layers."})
126
+ n_head: Optional[int] = field(default=12, metadata={"help": "Number of attention heads for each attention layer."})
127
+ n_inner: Optional[int] = field(default=None, metadata={"help": "Dimensionality of the inner feed-forward layers."})
128
+
129
+
130
+ @dataclass
131
+ class DataTrainingArguments:
132
+ """
133
+ Arguments pertaining to what data we are going to input our model for training and eval.
134
+ """
135
+
136
+ sanity: bool = field(
137
+ default=False, metadata={"help": "Only use fraction of the dataset"}
138
+ )
139
+ dataset_name: Optional[str] = field(
140
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
141
+ )
142
+ dataset_config_name: Optional[str] = field(
143
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
146
+ validation_file: Optional[str] = field(
147
+ default=None,
148
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
149
+ )
150
+ max_train_samples: Optional[int] = field(
151
+ default=None,
152
+ metadata={
153
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
154
+ "value if set."
155
+ },
156
+ )
157
+ max_val_samples: Optional[int] = field(
158
+ default=None,
159
+ metadata={
160
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
161
+ "value if set."
162
+ },
163
+ )
164
+
165
+ block_size: Optional[int] = field(
166
+ default=None,
167
+ metadata={
168
+ "help": "Optional input sequence length after tokenization. "
169
+ "The training dataset will be truncated in block of this size for training. "
170
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
171
+ },
172
+ )
173
+ overwrite_cache: bool = field(
174
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
175
+ )
176
+ validation_split_percentage: Optional[int] = field(
177
+ default=5,
178
+ metadata={
179
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
180
+ },
181
+ )
182
+ preprocessing_num_workers: Optional[int] = field(
183
+ default=None,
184
+ metadata={"help": "The number of processes to use for the preprocessing."},
185
+ )
186
+
187
+ def __post_init__(self):
188
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
189
+ raise ValueError("Need either a dataset name or a training/validation file.")
190
+ else:
191
+ if self.train_file is not None:
192
+ extension = self.train_file.split(".")[-1]
193
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
194
+ if self.validation_file is not None:
195
+ extension = self.validation_file.split(".")[-1]
196
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
197
+
198
+
199
+ def main():
200
+ # See all possible arguments in src/transformers/training_args.py
201
+ # or by passing the --help flag to this script.
202
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
203
+
204
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ConfigArguments))
205
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
206
+ # If we pass only one argument to the script and it's the path to a json file,
207
+ # let's parse it to get our arguments.
208
+ model_args, data_args, training_args, config_args = parser.parse_json_file(
209
+ json_file=os.path.abspath(sys.argv[1])
210
+ )
211
+ else:
212
+ model_args, data_args, training_args, config_args = parser.parse_args_into_dataclasses()
213
+
214
+ # Detecting last checkpoint.
215
+ last_checkpoint = None
216
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
217
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
218
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
219
+ raise ValueError(
220
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
221
+ "Use --overwrite_output_dir to overcome."
222
+ )
223
+ elif last_checkpoint is not None:
224
+ logger.info(
225
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
226
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
227
+ )
228
+
229
+ # Setup logging
230
+ logging.basicConfig(
231
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
232
+ datefmt="%m/%d/%Y %H:%M:%S",
233
+ handlers=[logging.StreamHandler(sys.stdout)],
234
+ )
235
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
236
+
237
+ # Log on each process the small summary:
238
+ logger.warning(
239
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
240
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
241
+ )
242
+ # Set the verbosity to info of the Transformers logger (on main process only):
243
+ if is_main_process(training_args.local_rank):
244
+ transformers.utils.logging.set_verbosity_info()
245
+ transformers.utils.logging.enable_default_handler()
246
+ transformers.utils.logging.enable_explicit_format()
247
+ logger.info(f"Training/evaluation parameters {training_args}")
248
+
249
+ # Set seed before initializing model.
250
+ set_seed(training_args.seed)
251
+
252
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
253
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
254
+ # (the dataset will be downloaded automatically from the datasets Hub).
255
+ #
256
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
257
+ # 'text' is found. You can easily tweak this behavior (see below).
258
+ #
259
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
260
+ # download the dataset.
261
+ if data_args.dataset_name is not None:
262
+ # Downloading and loading a dataset from the hub.
263
+ datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, keep_in_memory=False, cache_dir=model_args.cache_dir)
264
+ if "validation" not in datasets.keys():
265
+ datasets["validation"] = load_dataset(
266
+ data_args.dataset_name,
267
+ data_args.dataset_config_name,
268
+ split=f"train[:{data_args.validation_split_percentage}%]",
269
+ keep_in_memory=False,
270
+ cache_dir=model_args.cache_dir
271
+ )
272
+ datasets["train"] = load_dataset(
273
+ data_args.dataset_name,
274
+ data_args.dataset_config_name,
275
+ split=f"train[{data_args.validation_split_percentage}%:]",
276
+ keep_in_memory=False,
277
+ cache_dir=model_args.cache_dir
278
+ )
279
+ else:
280
+ data_files = {}
281
+ if data_args.train_file is not None:
282
+ data_files["train"] = data_args.train_file
283
+ if data_args.validation_file is not None:
284
+ data_files["validation"] = data_args.validation_file
285
+ extension = (
286
+ data_args.train_file.split(".")[-1]
287
+ if data_args.train_file is not None
288
+ else data_args.validation_file.split(".")[-1]
289
+ )
290
+ if extension == "txt":
291
+ extension = "text"
292
+ datasets = load_dataset(extension, data_files=data_files, keep_in_memory=False, cache_dir=model_args.cache_dir)
293
+ if data_args.sanity:
294
+ datasets["train"] = datasets["train"].shard(100, index=0, contiguous=True)
295
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
296
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
297
+
298
+ # Load pretrained model and tokenizer
299
+ #
300
+ # Distributed training:
301
+ # The .from_pretrained methods guarantee that only one local process can concurrently
302
+ # download model & vocab.
303
+
304
+ config_kwargs = {
305
+ "cache_dir": model_args.cache_dir,
306
+ "revision": model_args.model_revision,
307
+ "use_auth_token": True if model_args.use_auth_token else None,
308
+ }
309
+ if model_args.config_name:
310
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
311
+ elif model_args.model_name_or_path:
312
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
313
+ else:
314
+ config = CONFIG_MAPPING[model_args.model_type](**vars(config_args), **config_kwargs)
315
+ logger.warning("You are instantiating a new config instance from scratch.")
316
+
317
+ tokenizer_kwargs = {
318
+ "cache_dir": model_args.cache_dir,
319
+ "use_fast": model_args.use_fast_tokenizer,
320
+ "revision": model_args.model_revision,
321
+ "use_auth_token": True if model_args.use_auth_token else None,
322
+ }
323
+ if model_args.tokenizer_name:
324
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
325
+ elif model_args.model_name_or_path:
326
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
327
+ else:
328
+ raise ValueError(
329
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
330
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
331
+ )
332
+
333
+ if model_args.model_name_or_path:
334
+ model = AutoModelForSeq2SeqLM.from_pretrained(
335
+ model_args.model_name_or_path,
336
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
337
+ config=config,
338
+ cache_dir=model_args.cache_dir,
339
+ revision=model_args.model_revision,
340
+ use_auth_token=True if model_args.use_auth_token else None,
341
+ )
342
+ else:
343
+ logger.info("Training new model from scratch")
344
+ model = AutoModelForSeq2SeqLM.from_config(config)
345
+
346
+ model.resize_token_embeddings(len(tokenizer))
347
+
348
+ # Preprocessing the datasets.
349
+ # First we tokenize all the texts.
350
+ if training_args.do_train:
351
+ column_names = datasets["train"].column_names
352
+ else:
353
+ column_names = datasets["validation"].column_names
354
+ text_column_name = "text" if "text" in column_names else column_names[0]
355
+
356
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
357
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
358
+
359
+ datasets = datasets.shuffle()
360
+ def tokenize_function(examples):
361
+ with CaptureLogger(tok_logger) as cl:
362
+ output = tokenizer(examples[text_column_name])
363
+ # clm input could be much much longer than block_size
364
+ if "Token indices sequence length is longer than the" in cl.out:
365
+ tok_logger.warning(
366
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
367
+ )
368
+ return output
369
+
370
+ # Ensures only the main process does dataset pre-processing; the other ones will load the `map` cache
371
+ if not is_main_process(training_args.local_rank):
372
+ print("waiting for main process to execute mapping")
373
+ torch.distributed.barrier()
374
+
375
+ logger.info("Mapping dataset to tokenized dataset.",)
376
+ tokenized_datasets = datasets.map(
377
+ tokenize_function,
378
+ batched=True,
379
+ num_proc=data_args.preprocessing_num_workers,
380
+ remove_columns=column_names,
381
+ load_from_cache_file=not data_args.overwrite_cache,
382
+ keep_in_memory=False
383
+ )
384
+
385
+ if data_args.block_size is None:
386
+ block_size = tokenizer.model_max_length
387
+ if block_size > 1024:
388
+ logger.warning(
389
+ f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
390
+ "Picking 1024 instead. You can change that default value by passing --block_size xxx."
391
+ )
392
+ block_size = 1024
393
+ else:
394
+ if data_args.block_size > tokenizer.model_max_length:
395
+ logger.warning(
396
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
397
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
398
+ )
399
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
400
+
401
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
402
+ def group_texts(examples):
403
+ # Concatenate all texts.
404
+ concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
405
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
406
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
407
+ # customize this part to your needs.
408
+ total_length = (total_length // (2 * block_size)) * 2 * block_size
409
+ # Split by chunks of max_len.
410
+ result = {
411
+ k: [t[i : i + block_size] for i in range(0, total_length, 2*block_size)]
412
+ for k, t in concatenated_examples.items()
413
+ }
414
+ result["labels"] = [
415
+ concatenated_examples['input_ids'][i : i + block_size]
416
+ for i in range(block_size, total_length, 2*block_size)
417
+ ]
418
+ return result
419
+
420
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
421
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
422
+ # to preprocess.
423
+ #
424
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
425
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
426
+
427
+ logger.info("Chunking tokenized dataset.")
428
+ lm_datasets = tokenized_datasets.map(
429
+ group_texts,
430
+ batched=True,
431
+ num_proc=data_args.preprocessing_num_workers,
432
+ load_from_cache_file=not data_args.overwrite_cache,
433
+ keep_in_memory=False
434
+ )
435
+
436
+ # Now the other ones can catch up.
437
+ if training_args.local_rank != -1 and is_main_process(training_args.local_rank):
438
+ print("loading results from main process")
439
+ torch.distributed.barrier()
440
+
441
+ if training_args.do_train:
442
+ if "train" not in tokenized_datasets:
443
+ raise ValueError("--do_train requires a train dataset")
444
+ train_dataset = lm_datasets["train"]
445
+ if data_args.max_train_samples is not None:
446
+ train_dataset = train_dataset.select(range(data_args.max_train_samples))
447
+
448
+ if training_args.do_eval:
449
+ if "validation" not in tokenized_datasets:
450
+ cutoff = data_args.validation_split_percentage * len(lm_datasets["train"]) // 100
451
+ train_dataset = lm_datasets["train"].select(range(cutoff, len(lm_datasets["train"])))
452
+ eval_dataset = lm_datasets["train"].select(range(cutoff))
453
+ else:
454
+ eval_dataset = lm_datasets["validation"]
455
+ if data_args.max_val_samples is not None:
456
+ eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
457
+
458
+
459
+ # Initialize our Trainer
460
+ trainer = Trainer(
461
+ model=model,
462
+ args=training_args,
463
+ train_dataset=train_dataset if training_args.do_train else None,
464
+ eval_dataset=eval_dataset if training_args.do_eval else None,
465
+ tokenizer=tokenizer,
466
+ # Data collator will default to DataCollatorWithPadding, so we change it.
467
+ data_collator=default_data_collator,
468
+ callbacks=[LogFlosCallback, TensorBoardFloIndexedCallback]
469
+ )
470
+
471
+ # Training
472
+ if training_args.do_train:
473
+ checkpoint = None
474
+ if training_args.resume_from_checkpoint is not None:
475
+ checkpoint = training_args.resume_from_checkpoint
476
+ elif last_checkpoint is not None:
477
+ checkpoint = last_checkpoint
478
+
479
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
480
+ trainer.save_model() # Saves the tokenizer too for easy upload
481
+
482
+ metrics = train_result.metrics
483
+
484
+ max_train_samples = (
485
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
486
+ )
487
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
488
+
489
+ trainer.log_metrics("train", metrics)
490
+ trainer.save_metrics("train", metrics)
491
+ trainer.save_state()
492
+
493
+ # Evaluation
494
+ if training_args.do_eval:
495
+ logger.info("*** Evaluate ***")
496
+
497
+ metrics = trainer.evaluate()
498
+
499
+ max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
500
+ metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
501
+ perplexity = math.exp(metrics["eval_loss"])
502
+ metrics["perplexity"] = perplexity
503
+
504
+ trainer.log_metrics("eval", metrics)
505
+ trainer.save_metrics("eval", metrics)
506
+
507
+
508
+ def _mp_fn(index):
509
+ # For xla_spawn (TPUs)
510
+ main()
511
+
512
+
513
+ if __name__ == "__main__":
514
+ main()
jz/slurms_scripts/cpu.slurm ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=second_lm_balanced_prompted # job name
3
+ #SBATCH --ntasks=1 # number of MP task
4
+ #SBATCH --cpus-per-task=32 # number of cores per tasks
5
+ #SBATCH --hint=nomultithread # we get physical cores not logical
6
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
7
+ #SBATCH --output=%x-%j.out # output file name
8
+ #SBATCH --error=%x-%j.err # error file name
9
+ #SBATCH --account=ajs@cpu
10
+ #SBATCH --mail-type=ALL
11
+
12
+ set -x -e
13
+
14
+ DATASET=wiki_bk_prompted
15
+ SERIALIZATION_DIR=${ALL_CCFRSCRATCH}/experiments/preprocess_data
16
+
17
+ source ~/.bashrc
18
+ conda activate smallexps
19
+ export TOKENIZERS_PARALLELISM=false
20
+ export PYTHONUNBUFFERED=true
21
+ export HF_DATASETS_OFFLINE=1
22
+ export TRANSFORMERS_OFFLINE=1
23
+
24
+ python ${WORK}/jay-z/scripts/run_clm_prompted.py \
25
+ --model_name_or_path gpt2-medium \
26
+ --tokenizer_name gpt2 \
27
+ --dataset_name ${ALL_CCFRSCRATCH}/datasets/${DATASET} --block_size 1024 \
28
+ --preprocessing_num_workers 31 \
29
+ --group_by_length --length_column_name length \
30
+ --cache_dir ${CACHE_DIR} \
31
+ --do_train --do_eval \
32
+ --max_steps 15000 \
33
+ --max_train_samples 10000000 \
34
+ --per_device_train_batch_size 4 --gradient_accumulation_steps 16 \
35
+ --per_device_eval_batch_size 8 \
36
+ --output_dir ${SERIALIZATION_DIR} --overwrite_output_dir \
37
+ --report_to tensorboard \
38
+ --logging_strategy steps --logging_first_step --logging_dir tb --logging_steps 20 \
39
+ --eval_steps 250 --evaluation_strategy steps \
40
+ --save_strategy steps --save_steps 500 --save_total_limit 31
jz/slurms_scripts/eval.slurm ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=eval-array # job name
3
+ #SBATCH --qos=qos_gpu-t3 # t3 enables 20h jobs but on 512 GPUs
4
+ #SBATCH --ntasks=1 # number of MP tasks
5
+ #SBATCH --gres=gpu:4 # number of GPUs per node
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH -C v100-16g
8
+ #SBATCH --array=500-17000:1000%26 # array of values
9
+ #SBATCH --hint=nomultithread # we get physical cores not logical
10
+ #SBATCH --time=04:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=std-eval-%A_%a.out # output file name
12
+ #SBATCH --error=std-eval-%A_%a.out # error file name
13
+ #SBATCH --account=six@gpu
14
+ #SBATCH --mail-type=ALL
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-prod
19
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
20
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
21
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
22
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
23
+ export HF_DATASETS_OFFLINE=1
24
+ export TRANSFORMERS_OFFLINE=1
25
+
26
+ DATASET=openwebtext
27
+ SERIALIZATION_DIR=${eha_ALL_CCFRSCRATCH}/experiments/dec_only_t5-tiny
28
+
29
+ python -m torch.distributed.launch --nproc_per_node 4 ${six_ALL_CCFRWORK/code/bigscience/jz/scripts/run_clm.py \
30
+ --model_name_or_path ${SERIALIZATION_DIR}/checkpoint-${SLURM_ARRAY_TASK_ID} \
31
+ --tokenizer_name t5-small \
32
+ --dataset_name ${DATASET} --block_size 1024 \
33
+ --preprocessing_num_workers 76 \
34
+ --do_eval \
35
+ --per_device_eval_batch_size 16 \
36
+ --output_dir ${SERIALIZATION_DIR}/checkpoint-${SLURM_ARRAY_TASK_ID} \
37
+ --report_to tensorboard --logging_dir ${SERIALIZATION_DIR}/checkpoint-${SLURM_ARRAY_TASK_ID}
jz/slurms_scripts/lmt5.slurm ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=lmt5medium
3
+ #SBATCH --partition=gpu_p2
4
+ #SBATCH --qos=qos_gpu-t4 # t4 enables 100H trainings
5
+ #SBATCH --ntasks=1 # number of MP tasks
6
+ #SBATCH --gres=gpu:8 # number of GPUs per node
7
+ #SBATCH --cpus-per-task=24 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@gpu
13
+ #SBATCH --mail-type=ALL
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-prod
18
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
19
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
20
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
21
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
22
+ export HF_DATASETS_OFFLINE=1
23
+ export TRANSFORMERS_OFFLINE=1
24
+
25
+ DATASET=openwebtext
26
+ LOGG_FREQUENCY=125
27
+ SAVE_FREQUENCY=250
28
+ EVAL_FREQUENCY=1000
29
+ SERIALIZATION_DIR=${eha_ALL_CCFRSCRATCH}/experiments/lm_t5-medium
30
+ LOGGING_DIR=${eha_ALL_CCFRSCRATCH}/tensorboard/lm_t5-medium
31
+
32
+ deepspeed ${six_ALL_CCFRWORK/code/bigscience/jz/scripts/run_text2text.py \
33
+ --deepspeed ${six_ALL_CCFRWORK/code/bigscience/jz/configs/deepspeed/ds_zero3.json \
34
+ --model_type t5 \
35
+ --tokenizer_name t5-small \
36
+ --config_name ${six_ALL_CCFRWORK/code/bigscience/jz/configs/lm_t5/lm_t5-medium.json \
37
+ --dataset_name ${DATASET} --block_size 512 \
38
+ --preprocessing_num_workers 76 \
39
+ --do_train --do_eval \
40
+ --max_steps 34000 \
41
+ --per_device_train_batch_size 4 --gradient_accumulation_steps 8 \
42
+ --per_device_eval_batch_size 4 \
43
+ --learning_rate 3e-4 \
44
+ --adam_beta1 0.9 --adam_beta2 0.95 --weight_decay 0.1 \
45
+ --warmup_steps 800 \
46
+ --max_grad_norm 1.0 \
47
+ --output_dir ${SERIALIZATION_DIR} --overwrite_output_dir \
48
+ --report_to tensorboard \
49
+ --logging_strategy steps --logging_first_step --logging_dir ${LOGGING_DIR} --logging_steps ${LOGG_FREQUENCY} \
50
+ --eval_steps ${EVAL_FREQUENCY} --evaluation_strategy steps --max_val_samples 10000 \
51
+ --save_strategy steps --save_steps ${SAVE_FREQUENCY} --save_total_limit 200
train/arch-and-scaling-template.slurm ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ # TODO: modify these for your training setup, just Ctrl-F replace <YOUR_TRAINING_NAME>
18
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/<YOUR_TRAINING_NAME>
19
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
20
+ REPO_PATH=$DATA_OUTPUT_PATH/<YOUR_TRAINING_NAME>-logs
21
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
22
+ CODECARBON_PATH=$REPO_PATH/codecarbon
23
+ LOGS_PATH=$REPO_PATH/logs
24
+ # You need to git clone the Megatron-DeepSpeed
25
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
26
+
27
+ # TODO: you may change the dataset, some examples are at tr3-1B3-baseline (tr3 = c4 + t5-tokenizer, tr3m = the Pile)
28
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
29
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
30
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
31
+
32
+ # defining the right environment variables
33
+ source $six_ALL_CCFRWORK/start-prod
34
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
35
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
36
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
37
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
38
+ export HF_DATASETS_OFFLINE=1
39
+ export TRANSFORMERS_OFFLINE=1
40
+ cd $MEGATRON_DEEPSPEED_REPO
41
+
42
+ # testing for potential faulty nodes
43
+ srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
44
+
45
+ # so processes know who to talk to
46
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
47
+ MASTER_PORT=6000
48
+
49
+ # TODO: this is our base config for 1B3, edit PP/TP/batch size/model config if smaller or bigger
50
+ GPUS_PER_NODE=4
51
+ NNODES=16
52
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
53
+ TP_SIZE=1 # always fixed to the size of a single node
54
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
55
+
56
+ MICRO_BATCH_SIZE=1
57
+ GLOBAL_BATCH_SIZE=512
58
+ TRAIN_ITER=73_242_187
59
+
60
+ NLAYERS=24
61
+ NHIDDEN=2048
62
+ NHEADS=16
63
+ FFN_HIDDEN_SIZE=8192
64
+ SEQ_LEN=2048
65
+
66
+ SAVE_INTERVAL=1500
67
+
68
+ OPTIMIZER_ARGS=" \
69
+ --optimizer adam \
70
+ --adam-beta1 0.9 \
71
+ --adam-beta2 0.999 \
72
+ --adam-eps 1e-8 \
73
+ --lr 2e-4 \
74
+ --min-lr 1e-5 \
75
+ --lr-decay-style cosine \
76
+ --lr-warmup-samples 183_105 \
77
+ --clip-grad 1.0 \
78
+ --weight-decay 1e-1 \
79
+ "
80
+
81
+ EXIT_OPTS=" \
82
+ --exit-duration-in-mins 1190 \
83
+ "
84
+
85
+ GPT_ARGS=" \
86
+ --num-layers $NLAYERS \
87
+ --hidden-size $NHIDDEN \
88
+ --num-attention-heads $NHEADS \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --rampup-batch-size 32 32 2_000_000 \
95
+ --train-samples $TRAIN_ITER \
96
+ --vocab-file $VOCAB_FILE \
97
+ --merge-file $MERGE_FILE \
98
+ --loss-scale 12 \
99
+ --clip-grad 1.0 \
100
+ --fp16 \
101
+ --checkpoint-activations \
102
+ $OPTIMIZER_ARGS \
103
+ $EXIT_OPTS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 200 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 1000 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+ # TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current
118
+ # series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done
119
+
120
+ ZERO_STAGE=1
121
+
122
+ config_json="./ds_config.$SLURM_JOBID.json"
123
+
124
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
125
+ cat <<EOT > $config_json
126
+ {
127
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
128
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
129
+ "gradient_clipping": 1.0,
130
+ "zero_optimization": {
131
+ "stage": $ZERO_STAGE
132
+ },
133
+ "fp16": {
134
+ "enabled": true,
135
+ "loss_scale": 0,
136
+ "loss_scale_window": 500,
137
+ "hysteresis": 2,
138
+ "min_loss_scale": 1,
139
+ "initial_scale_power": 12
140
+ },
141
+ "steps_per_print": 2000,
142
+ "wall_clock_breakdown": false
143
+ }
144
+ EOT
145
+
146
+
147
+ DEEPSPEED_ARGS=" \
148
+ --deepspeed \
149
+ --deepspeed_config ${config_json} \
150
+ --zero-stage ${ZERO_STAGE} \
151
+ --deepspeed-activation-checkpointing \
152
+ "
153
+
154
+ export LAUNCHER="python -u -m torch.distributed.launch \
155
+ --nproc_per_node $GPUS_PER_NODE \
156
+ --nnodes $NNODES \
157
+ --master_addr $MASTER_ADDR \
158
+ --master_port $MASTER_PORT \
159
+ "
160
+
161
+ export CMD=" \
162
+ `pwd`/pretrain_gpt.py \
163
+ --tensor-model-parallel-size $TP_SIZE \
164
+ --pipeline-model-parallel-size $PP_SIZE \
165
+ $GPT_ARGS \
166
+ $OUTPUT_ARGS \
167
+ --save $CHECKPOINT_PATH \
168
+ --load $CHECKPOINT_PATH \
169
+ --data-path $DATA_PATH \
170
+ --data-impl mmap \
171
+ --split 949,50,1 \
172
+ --distributed-backend nccl \
173
+ $DEEPSPEED_ARGS \
174
+ "
175
+
176
+
177
+ # # clear old checkpoint as it'd mismatch while we sort things out
178
+ # rm -rf $SAVE_CHECKPOINT_PATH
179
+
180
+
181
+ echo $CMD
182
+
183
+ # We create the folder where the logs and codecarbon will be stored.
184
+ mkdir -p $LOGS_PATH
185
+ # to debug - add echo (it exits and prints what it would have launched)
186
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
train/fixes.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fixing things
2
+
3
+ ## Fix multiple checkpoints per branch on hub
4
+
5
+ Update all `config.json` files:
6
+
7
+ ```
8
+ cd /gpfsssd/scratch/rech/six/commun/experiments/fix-config/
9
+ export GIT_LFS_SKIP_SMUDGE=1
10
+ git clone https://huggingface.co/bigscience/tr3e-1B3-c4-checkpoints
11
+ cd tr3e-1B3-c4-checkpoints
12
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
13
+ set +H
14
+ git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]'
15
+ export GIT_LFS_SKIP_SMUDGE=0
16
+ ```
17
+
18
+ ```
19
+ cd /gpfsssd/scratch/rech/six/commun/experiments/fix-config/
20
+ export GIT_LFS_SKIP_SMUDGE=1
21
+ git clone https://huggingface.co/bigscience/tr3d-1B3-oscar-checkpoints
22
+ cd tr3d-1B3-oscar-checkpoints
23
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
24
+ set +H
25
+ git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]'
26
+ export GIT_LFS_SKIP_SMUDGE=0
27
+ ```
28
+
29
+
30
+ ```
31
+ cd /gpfsssd/scratch/rech/six/commun/experiments/fix-config/
32
+ export GIT_LFS_SKIP_SMUDGE=1
33
+ git clone https://huggingface.co/bigscience/tr3m-1B3-pile-checkpoints
34
+ cd tr3m-1B3-pile-checkpoints
35
+ set +H
36
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
37
+ git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]'
38
+ export GIT_LFS_SKIP_SMUDGE=0
39
+ ```
40
+
41
+ ## Fix corrupted git
42
+
43
+
44
+ Quite a few times now we had an odd git corruption for the logging repos:
45
+
46
+
47
+ ```
48
+ OSError: error: invalid object 100644 e69f03783ce2b0af675405f22b49ebeb56d907e5 for '.gitattributes'
49
+ error: invalid object 100644 e69f03783ce2b0af675405f22b49ebeb56d907e5 for '.gitattributes'
50
+ error: Error building trees
51
+ ```
52
+
53
+ Of course, the error can be different.
54
+
55
+ Perhaps slurm somehow occasionally kills the syncing process while git is doing something internally and thus corrupts it. It's hard to tell.
56
+
57
+ You can fix these easily but making a new clone and swapping in just the `.git` dir. That fixes it up.
58
+
59
+ Here is the full process using `tr8b-104B-logs` as an example:
60
+
61
+ ```
62
+ cd checkpoints/tr8b-104B/
63
+ git clone https://huggingface.co/bigscience/tr8b-104B-logs/ tr8b-104B-logs-new
64
+ mkdir trash
65
+ mv tr8b-104B-logs/.git trash
66
+ cp -r tr8b-104B-logs-new/.git tr8b-104B-logs/.git
67
+ # check that it is no longer broken
68
+ cd tr8b-104B-logs
69
+ git gc
70
+ ```
train/lessons-learned.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lessons learned
2
+
3
+ The following are super-brief summary notes. If you want the details with graphs and full notes, see:
4
+
5
+ 13B:
6
+ * [chronicles](./tr1-13B-base/chronicles.md)
7
+
8
+ 104B:
9
+ * [chronicles a](./tr8-104B-wide/chronicles.md)
10
+ * [chronicles b](./tr8b-104B/chronicles.md)
11
+
12
+ ## How training divergences were overcome
13
+
14
+ The following are techniques that have to be done before the training starts.
15
+
16
+ ### Using a formulaic std init
17
+
18
+ Setting `--init-method-std` to `sqrt(2/(NHIDDEN*5))` has made a huge difference to the training stability.
19
+
20
+ e.g. for `NHIDDEN=11600` we used `--init-method-std 0.006`
21
+
22
+ We derived this from:
23
+
24
+ `0.00587220219514703 = sqrt(2/(11600*5))` (from the "Transformers without Tears" paper https://arxiv.org/abs/1910.05895)
25
+
26
+ If you are wondering why the depth of the model is not included in this month, it's then used by the framework internally via a [second std init function](https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/40e8b2a086f98920de692ebc4081bf4229bfa81a/megatron/model/utils.py#L33-L40) which rescales the std of the second layer in the MLP and the output layer of the attention with:
27
+ ```
28
+ std = sigma / math.sqrt(2.0 * num_layers)
29
+ ```
30
+ where `sigma` is the `--init-method-std` argument.
31
+
32
+ Note that Megatron-Deepspeed 530B Training used an even smaller init of `sqrt(1/(NHIDDEN*3))`. [Reference](https://arxiv.org/abs/2201.11990). (so their co-efficient under `sqrt` is 0.333 and ours is 0.4).
33
+
34
+
35
+ ### Adding embed layernorm
36
+
37
+ Embedding LayerNorm has shown to help a lot with spikes that the training can't recover from. This insight came from experimenting with https://github.com/facebookresearch/bitsandbytes which contains a `StableEmbedding` which is a normal Embedding with layernorm and it uses a uniform xavier initialization.
38
+
39
+ To activate add `--embed-layernorm`
40
+
41
+ Note: since this has its weights you can only add it at the beginning of the training
42
+
43
+ Note: since this is not part of the normal HF GPT2, this will require a new arch or a config that adds a layer-norm to the GPT2 model.
44
+
45
+
46
+ ### Using a Lower LR
47
+
48
+ - halving lr from 6e-5 to 3e-5 also proved fruitful, but it went through a huge spike at iteration 11.5k and took ~2k iterations to recover (exp 11) at which point it was put on hold and other approaches were experimented with.
49
+
50
+
51
+ ### Patience
52
+
53
+ In some cases in the case of a huge spike it was taking 2k iterations for a training to return to the same lm loss it spiked from. And then it'd continue training as if nothing happened.
54
+
55
+ But more often than not the training won't recover from a spike.
56
+
57
+ Yet in other situations the training diverged slowly without any spikes.
58
+
59
+
60
+ ## How to deal with ongoing instabilities
61
+
62
+ How to recover from an instability without a full restart.
63
+
64
+ ### Data skipping
65
+
66
+ 1. Roll back to the last checkpoint before the instability
67
+ 2. skip data samples from the instability window `--skip-train-iteration-range 8401-8800 `
68
+
69
+ ### LR Changing
70
+
71
+ Normally LR-related params can't be changed once training has started (Megatron asserts) but with `--override-lr-scheduler` we can completely rewrite them and it just works. that is megatron recalculates everything based on cmd line args and sets the LR to the right setting which can be very different from what it'd have normally been.
72
+
73
+ So for example now we can rollback a bit and change LR if we need to to try to overcome some rough patch of data or some other instability.
74
+
75
+
76
+ ## What was tried and it didn't work
77
+
78
+ - changing seed - the problem usually would just shift elsewhere - but it might work in some situation where data skipping worked
79
+
80
+ - a more numerically stable self-attention version by multiplying the two matrices passed to `torch.baddbmm` by `1.0/math.sqrt(self.norm_factor)` and then using `alpha=1.0`
81
+
82
+ - lowering `beta2` to 0.95 (from 0.999)
83
+
84
+ - changing width/depth ratio
85
+
86
+ - longer lr warmup
87
+
88
+ - tried Curriculum Learning
train/tflops_optimization.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Rule of thumb for optimizing TFLOPS
2
+
3
+ Given n gpus, we're interested in finding the configuration that allows us to run the model the fastest:
4
+
5
+ When to use DP:
6
+ - Whenever you can. Use as much DP as you can.
7
+ - It does have a negative impact if the number `$GBS / $MBS` is close to DP as you end up losing pipeline efficiency
8
+
9
+ When to use TP:
10
+ - When the largest layer does not fit into a single gpu (along with all the activation, optimizer states and gradient memory).
11
+ - TP is communication heavy, so you should never go beyond the number of gpus available in a single node
12
+
13
+ When to use PP:
14
+ - When the entire model doesn't fit in a single gpu.
15
+
16
+ The recipe goes as follow:
17
+ 1) Determine TP*PP (we'll refer to this value as MP later on):
18
+ 1) Try and compare with some existing similar architecture (13B GPT needed 8 GPUS for one replica, ie TP*DP = 8)
19
+ 1) The factor in model size should be roughly the same as the factor in gpus
20
+ 2) Empiric rule: model_size*18 < 75% of gpu (to take in account additional activation memory)
21
+ 1) If that is `True` then you don't need any model parallelism
22
+ 3) Test different configurations with a single replica starting from TP=1/PP=1 with a single replica (DP=1) until you don't have OOM errors
23
+ 2) You usually want PP=$MP unless a single layer doesn't fit in a single gpu in which case TP is necessary:
24
+ 1) You can use the rule the empiric rule in 1.ii for single layers to get an idea if you need TP or not.
25
+
26
+ Bear in mind that this is just guidelines that will help you to quickly narrow down the configuration options. But you should still try a few different configurations and see which one gives the best throughput.
27
+
28
+ Also watch the gpu memory usage by logging into one of the nodes. You don't want it to be too close to the max. And also you don't want to have a lot of free gpu memory available. If there is then you can tune things up more to squeeze a higher gpu utilization. e.g. you can benchmark raising MBS to use the free memory. But test the impact, since it doesn't always make things faster.
29
+
30
+ Additionally, be aware of [the different constraints](https://github.com/bigscience-workshop/bigscience/blob/master/train/sanity-checks.md).
31
+
32
+ Here is [how to calculate TFLOPs](https://github.com/bigscience-workshop/bigscience/tree/master/math#calculate-tflops).
33
+
train/tr10-13B-ml/chronicles.md ADDED
File without changes
train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3zhmt.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3zhmt
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 80:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3zhmt
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3zhmt_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3zhmt_validation_pretr.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+ # 250
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-a100.slurm ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixnewcodelong350m
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --qos=qos_gpu-gc # up to 100h
6
+ #SBATCH --nodes=4
7
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
8
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
9
+ #SBATCH --hint=nomultithread # we get physical cores not logical
10
+ #SBATCH --gres=gpu:8 # number of gpus
11
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
12
+ #SBATCH --output=%x-%j.out # output file name
13
+ #SBATCH --account=six@a100
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ echo "START TIME: $(date)"
19
+
20
+ variant=xp3capmixnewcodelonglossseq
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13e-350M-ml-t0
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13e-350M-ml-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
26
+ LOGS_PATH=$REPO_PATH/logs/$variant
27
+ mkdir -p $LOGS_PATH
28
+ mkdir -p $TENSORBOARD_PATH
29
+
30
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew350m/Megatron-DeepSpeed
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13e-350M-mtf
34
+
35
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
36
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
37
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
38
+
39
+ # defining the right environment variables
40
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
41
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
42
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
43
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
44
+ export HF_DATASETS_OFFLINE=1
45
+ export TRANSFORMERS_OFFLINE=1
46
+
47
+ # testing for potential faulty nodes
48
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
49
+
50
+ # so processes know who to talk to
51
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
52
+ MASTER_PORT=6001
53
+
54
+ GPUS_PER_NODE=8
55
+ NNODES=$SLURM_NNODES
56
+
57
+ PP_SIZE=1
58
+ TP_SIZE=1
59
+
60
+ # T0 paper:
61
+ # ...truncate input and target sequences to 1024 and 256 tokens...
62
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
63
+ # We use 2048 total tokens and 512 batch size = 2**20
64
+ MICRO_BATCH_SIZE=1
65
+ GLOBAL_BATCH_SIZE=1024
66
+
67
+ NLAYERS=24
68
+ NHIDDEN=1024
69
+ NHEADS=16
70
+ SEQ_LEN=2048
71
+ # 250
72
+ SAVE_INTERVAL=2
73
+
74
+ TRAIN_SAMPLES=6_348_800
75
+
76
+ # T0 paper:
77
+ # "...we use a learning rate of 1e-3..."
78
+ # However, they use Adafactor, which adapts the LR
79
+ # For Adam we likely want a lower one
80
+ # FLAN:
81
+ # "...decay of 1e-4..""
82
+
83
+ # Uncomment for the first step
84
+ # --no-load-optim \
85
+ # --reset-progress \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --reset-progress \
98
+ --norm-target-loss \
99
+ "
100
+ # for 20h 1190, for 100h 5990
101
+ # --exit-duration-in-mins 1190 \
102
+ EXIT_OPTS=" \
103
+ --exit-duration-in-mins 5990 \
104
+ "
105
+
106
+ GPT_ARGS=" \
107
+ --pp-partition-method 'type:transformer|embedding' \
108
+ --num-layers $NLAYERS \
109
+ --hidden-size $NHIDDEN \
110
+ --num-attention-heads $NHEADS \
111
+ --seq-length $SEQ_LEN \
112
+ --max-position-embeddings $SEQ_LEN \
113
+ --micro-batch-size $MICRO_BATCH_SIZE \
114
+ --global-batch-size $GLOBAL_BATCH_SIZE \
115
+ --train-samples $TRAIN_SAMPLES \
116
+ --tokenizer-type PretrainedFromHF \
117
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
118
+ --init-method-std 0.0048 \
119
+ --embed-layernorm \
120
+ --fp16 \
121
+ --seed 42 \
122
+ --position-embedding-type alibi \
123
+ --checkpoint-activations \
124
+ --abort-on-unmet-fused-kernel-constraints \
125
+ --kill-switch-path $KILL_SWITCH_PATH \
126
+ --pad-vocab-size-to 250880 \
127
+ $OPTIMIZER_ARGS \
128
+ $EXIT_OPTS \
129
+ "
130
+
131
+ OUTPUT_ARGS=" \
132
+ --log-interval 1 \
133
+ --save-interval $SAVE_INTERVAL \
134
+ --eval-interval 125 \
135
+ --eval-iters 10 \
136
+ --tensorboard-dir $TENSORBOARD_PATH \
137
+ --tensorboard-queue-size 5 \
138
+ --log-timers-to-tensorboard \
139
+ --log-batch-size-to-tensorboard \
140
+ --log-validation-ppl-to-tensorboard \
141
+ "
142
+
143
+ ZERO_STAGE=1
144
+
145
+ config_json="./ds_config.$SLURM_JOBID.json"
146
+
147
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
148
+ cat <<EOT > $config_json
149
+ {
150
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
151
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
152
+ "gradient_clipping": 1.0,
153
+ "zero_optimization": {
154
+ "stage": $ZERO_STAGE
155
+ },
156
+ "fp16": {
157
+ "enabled": true,
158
+ "loss_scale": 0,
159
+ "loss_scale_window": 500,
160
+ "hysteresis": 2,
161
+ "min_loss_scale": 1,
162
+ "initial_scale_power": 12
163
+ },
164
+ "steps_per_print": 2000,
165
+ "wall_clock_breakdown": false
166
+ }
167
+ EOT
168
+
169
+
170
+ DEEPSPEED_ARGS=" \
171
+ --deepspeed \
172
+ --deepspeed_config ${config_json} \
173
+ --zero-stage ${ZERO_STAGE} \
174
+ --deepspeed-activation-checkpointing \
175
+ "
176
+
177
+ export LAUNCHER="python -u -m torch.distributed.run \
178
+ --nproc_per_node $GPUS_PER_NODE \
179
+ --nnodes $NNODES \
180
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
181
+ --rdzv_backend c10d \
182
+ --max_restarts 0 \
183
+ --tee 3 \
184
+ "
185
+
186
+ export CMD=" \
187
+ `pwd`/finetune_t0.py \
188
+ --tensor-model-parallel-size $TP_SIZE \
189
+ --pipeline-model-parallel-size $PP_SIZE \
190
+ $GPT_ARGS \
191
+ $OUTPUT_ARGS \
192
+ --save $CHECKPOINT_PATH \
193
+ --load $CHECKPOINT_PATH \
194
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
195
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
196
+ --dataloader-type single \
197
+ --data-impl mmap \
198
+ --distributed-backend nccl \
199
+ $DEEPSPEED_ARGS \
200
+ "
201
+
202
+ echo $CMD
203
+
204
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
205
+ export CUDA_LAUNCH_BLOCKING=1
206
+
207
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
208
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
209
+
210
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
211
+
212
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixnewcodelong
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ echo "START TIME: $(date)"
18
+
19
+ variant=xp3capmixnewcodelonglossseq
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13e-350M-ml-t0
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
23
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13e-350M-ml-logs
24
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
25
+ LOGS_PATH=$REPO_PATH/logs/$variant
26
+ mkdir -p $LOGS_PATH
27
+ mkdir -p $TENSORBOARD_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew350m/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13e-350M-mtf
33
+
34
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
35
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
36
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
37
+
38
+ # defining the right environment variables
39
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
40
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
41
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
42
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
43
+ export HF_DATASETS_OFFLINE=1
44
+ export TRANSFORMERS_OFFLINE=1
45
+
46
+ # testing for potential faulty nodes
47
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
48
+
49
+ # so processes know who to talk to
50
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
51
+ MASTER_PORT=6001
52
+
53
+ GPUS_PER_NODE=4
54
+ NNODES=$SLURM_NNODES
55
+
56
+ PP_SIZE=1
57
+ TP_SIZE=1
58
+
59
+ # T0 paper:
60
+ # ...truncate input and target sequences to 1024 and 256 tokens...
61
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
62
+ # We use 2048 total tokens and 512 batch size = 2**20
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=1024
65
+
66
+ NLAYERS=24
67
+ NHIDDEN=1024
68
+ NHEADS=16
69
+ SEQ_LEN=2048
70
+ # 250
71
+ SAVE_INTERVAL=2
72
+
73
+ TRAIN_SAMPLES=6_348_800
74
+
75
+ # T0 paper:
76
+ # "...we use a learning rate of 1e-3..."
77
+ # However, they use Adafactor, which adapts the LR
78
+ # For Adam we likely want a lower one
79
+ # FLAN:
80
+ # "...decay of 1e-4..""
81
+
82
+ # Uncomment for the first step
83
+ # --no-load-optim \
84
+ # --reset-progress \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ --reset-progress \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13e-760m-mtf-xp3capmixnewcodelonglossseq-a100.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixnewcodelong760m
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=16
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ echo "START TIME: $(date)"
19
+
20
+ variant=xp3capmixnewcodelonglossseq
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13d-760M-ml-t0
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13d-760M-ml-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
26
+ LOGS_PATH=$REPO_PATH/logs/$variant
27
+ mkdir -p $LOGS_PATH
28
+ mkdir -p $TENSORBOARD_PATH
29
+
30
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13e-350M-mtf
34
+
35
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
36
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
37
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
38
+
39
+ # defining the right environment variables
40
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
41
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
42
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
43
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
44
+ export HF_DATASETS_OFFLINE=1
45
+ export TRANSFORMERS_OFFLINE=1
46
+
47
+ # testing for potential faulty nodes
48
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
49
+
50
+ # so processes know who to talk to
51
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
52
+ MASTER_PORT=6001
53
+
54
+ GPUS_PER_NODE=8
55
+ NNODES=$SLURM_NNODES
56
+
57
+ PP_SIZE=2
58
+ TP_SIZE=1
59
+
60
+ # T0 paper:
61
+ # ...truncate input and target sequences to 1024 and 256 tokens...
62
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
63
+ # We use 2048 total tokens and 512 batch size = 2**20
64
+ MICRO_BATCH_SIZE=1
65
+ GLOBAL_BATCH_SIZE=1024
66
+
67
+ NLAYERS=24
68
+ NHIDDEN=1536
69
+ NHEADS=16
70
+ SEQ_LEN=2048
71
+ # 250
72
+ SAVE_INTERVAL=2
73
+
74
+ TRAIN_SAMPLES=6_348_800
75
+
76
+ # T0 paper:
77
+ # "...we use a learning rate of 1e-3..."
78
+ # However, they use Adafactor, which adapts the LR
79
+ # For Adam we likely want a lower one
80
+ # FLAN:
81
+ # "...decay of 1e-4..""
82
+
83
+ # Uncomment for the first step
84
+ # --no-load-optim \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ --norm-target-loss \
97
+ --reset-progress \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 5 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13e-760m-mtf-xp3capmixnewcodelonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixnewcodelong760m
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ echo "START TIME: $(date)"
18
+
19
+ variant=xp3capmixnewcodelonglossseq
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13d-760M-ml-t0
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
23
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13d-760M-ml-logs
24
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
25
+ LOGS_PATH=$REPO_PATH/logs/$variant
26
+ mkdir -p $LOGS_PATH
27
+ mkdir -p $TENSORBOARD_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13e-350M-mtf
33
+
34
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
35
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
36
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
37
+
38
+ # defining the right environment variables
39
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
40
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
41
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
42
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
43
+ export HF_DATASETS_OFFLINE=1
44
+ export TRANSFORMERS_OFFLINE=1
45
+
46
+ # testing for potential faulty nodes
47
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
48
+
49
+ # so processes know who to talk to
50
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
51
+ MASTER_PORT=6001
52
+
53
+ GPUS_PER_NODE=4
54
+ NNODES=$SLURM_NNODES
55
+
56
+ PP_SIZE=2
57
+ TP_SIZE=1
58
+
59
+ # T0 paper:
60
+ # ...truncate input and target sequences to 1024 and 256 tokens...
61
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
62
+ # We use 2048 total tokens and 512 batch size = 2**20
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=1024
65
+
66
+ NLAYERS=24
67
+ NHIDDEN=1536
68
+ NHEADS=16
69
+ SEQ_LEN=2048
70
+ # 250
71
+ SAVE_INTERVAL=2
72
+
73
+ TRAIN_SAMPLES=6_348_800
74
+
75
+ # T0 paper:
76
+ # "...we use a learning rate of 1e-3..."
77
+ # However, they use Adafactor, which adapts the LR
78
+ # For Adam we likely want a lower one
79
+ # FLAN:
80
+ # "...decay of 1e-4..""
81
+
82
+ # Uncomment for the first step
83
+ # --no-load-optim \
84
+ # --reset-progress \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ --reset-progress \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6B3-mtf-eos.slurm ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=eodtr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=eos
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31eos_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31eos_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=1000
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ "
97
+ # for 20h 1190, for 100h 5990
98
+ # --exit-duration-in-mins 1190 \
99
+ EXIT_OPTS=" \
100
+ --exit-duration-in-mins 5990 \
101
+ "
102
+
103
+ GPT_ARGS=" \
104
+ --pp-partition-method 'type:transformer|embedding' \
105
+ --num-layers $NLAYERS \
106
+ --hidden-size $NHIDDEN \
107
+ --num-attention-heads $NHEADS \
108
+ --seq-length $SEQ_LEN \
109
+ --max-position-embeddings $SEQ_LEN \
110
+ --micro-batch-size $MICRO_BATCH_SIZE \
111
+ --global-batch-size $GLOBAL_BATCH_SIZE \
112
+ --train-samples $TRAIN_SAMPLES \
113
+ --tokenizer-type PretrainedFromHF \
114
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
115
+ --init-method-std 0.0048 \
116
+ --embed-layernorm \
117
+ --fp16 \
118
+ --seed 42 \
119
+ --position-embedding-type alibi \
120
+ --checkpoint-activations \
121
+ --abort-on-unmet-fused-kernel-constraints \
122
+ --kill-switch-path $KILL_SWITCH_PATH \
123
+ --pad-vocab-size-to 250880 \
124
+ $OPTIMIZER_ARGS \
125
+ $EXIT_OPTS \
126
+ "
127
+
128
+ OUTPUT_ARGS=" \
129
+ --log-interval 1 \
130
+ --save-interval $SAVE_INTERVAL \
131
+ --eval-interval 250 \
132
+ --eval-iters 50 \
133
+ --tensorboard-dir $TENSORBOARD_PATH \
134
+ --tensorboard-queue-size 5 \
135
+ --log-timers-to-tensorboard \
136
+ --log-batch-size-to-tensorboard \
137
+ --log-validation-ppl-to-tensorboard \
138
+ "
139
+
140
+ ZERO_STAGE=1
141
+
142
+ config_json="./ds_config.$SLURM_JOBID.json"
143
+
144
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
145
+ cat <<EOT > $config_json
146
+ {
147
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
148
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
149
+ "gradient_clipping": 1.0,
150
+ "zero_optimization": {
151
+ "stage": $ZERO_STAGE
152
+ },
153
+ "fp16": {
154
+ "enabled": true,
155
+ "loss_scale": 0,
156
+ "loss_scale_window": 500,
157
+ "hysteresis": 2,
158
+ "min_loss_scale": 1,
159
+ "initial_scale_power": 12
160
+ },
161
+ "steps_per_print": 2000,
162
+ "wall_clock_breakdown": false
163
+ }
164
+ EOT
165
+
166
+
167
+ DEEPSPEED_ARGS=" \
168
+ --deepspeed \
169
+ --deepspeed_config ${config_json} \
170
+ --zero-stage ${ZERO_STAGE} \
171
+ --deepspeed-activation-checkpointing \
172
+ "
173
+
174
+ export LAUNCHER="python -u -m torch.distributed.run \
175
+ --nproc_per_node $GPUS_PER_NODE \
176
+ --nnodes $NNODES \
177
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
178
+ --rdzv_backend c10d \
179
+ --max_restarts 0 \
180
+ --tee 3 \
181
+ "
182
+
183
+ export CMD=" \
184
+ `pwd`/finetune_t0.py \
185
+ --tensor-model-parallel-size $TP_SIZE \
186
+ --pipeline-model-parallel-size $PP_SIZE \
187
+ $GPT_ARGS \
188
+ $OUTPUT_ARGS \
189
+ --save $CHECKPOINT_PATH \
190
+ --load $CHECKPOINT_PATH \
191
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
192
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
193
+ --dataloader-type single \
194
+ --data-impl mmap \
195
+ --distributed-backend nccl \
196
+ $DEEPSPEED_ARGS \
197
+ "
198
+
199
+ echo $CMD
200
+
201
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
202
+ export CUDA_LAUNCH_BLOCKING=1
203
+
204
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
205
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
206
+
207
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
208
+
209
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=p31tr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=p31
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=500
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmix.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3mixedtr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmix
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmix_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmix_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlong.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixfixlong
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixfixlong
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfixlong_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfixlong_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixfixlonglossseq
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixfixlonglossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfixlong_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfixlong_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlonglossseq2.slurm ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixfixlonglossseq2
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixfixlonglossseq2
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ #MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew2/Megatron-DeepSpeed
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
36
+
37
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfixlong_train.txt
38
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfixlong_validation.txt
39
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
40
+
41
+ # defining the right environment variables
42
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
43
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
44
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
45
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
46
+ export HF_DATASETS_OFFLINE=1
47
+ export TRANSFORMERS_OFFLINE=1
48
+
49
+ # testing for potential faulty nodes
50
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
51
+
52
+ # so processes know who to talk to
53
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
54
+ MASTER_PORT=6001
55
+
56
+ GPUS_PER_NODE=8
57
+ NNODES=$SLURM_NNODES
58
+
59
+ PP_SIZE=1
60
+ TP_SIZE=1
61
+
62
+ # T0 paper:
63
+ # ...truncate input and target sequences to 1024 and 256 tokens...
64
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
65
+ # We use 2048 total tokens and 512 batch size = 2**20
66
+ MICRO_BATCH_SIZE=4
67
+ GLOBAL_BATCH_SIZE=2048
68
+
69
+ NLAYERS=30
70
+ NHIDDEN=4096
71
+ NHEADS=32
72
+ SEQ_LEN=2048
73
+
74
+ SAVE_INTERVAL=2
75
+
76
+ TRAIN_SAMPLES=6_348_800
77
+
78
+ # T0 paper:
79
+ # "...we use a learning rate of 1e-3..."
80
+ # However, they use Adafactor, which adapts the LR
81
+ # For Adam we likely want a lower one
82
+ # FLAN:
83
+ # "...decay of 1e-4..""
84
+
85
+ # Uncomment for the first step
86
+ # --no-load-optim \
87
+ OPTIMIZER_ARGS=" \
88
+ --optimizer adam \
89
+ --adam-beta1 0.9 \
90
+ --adam-beta2 0.95 \
91
+ --adam-eps 1e-8 \
92
+ --lr 2e-5 \
93
+ --lr-decay-style constant \
94
+ --lr-warmup-samples 0 \
95
+ --clip-grad 1.0 \
96
+ --weight-decay 1e-4 \
97
+ --no-load-optim \
98
+ --norm-target-loss \
99
+ "
100
+ # for 20h 1190, for 100h 5990
101
+ # --exit-duration-in-mins 1190 \
102
+ EXIT_OPTS=" \
103
+ --exit-duration-in-mins 5990 \
104
+ "
105
+
106
+ GPT_ARGS=" \
107
+ --pp-partition-method 'type:transformer|embedding' \
108
+ --num-layers $NLAYERS \
109
+ --hidden-size $NHIDDEN \
110
+ --num-attention-heads $NHEADS \
111
+ --seq-length $SEQ_LEN \
112
+ --max-position-embeddings $SEQ_LEN \
113
+ --micro-batch-size $MICRO_BATCH_SIZE \
114
+ --global-batch-size $GLOBAL_BATCH_SIZE \
115
+ --train-samples $TRAIN_SAMPLES \
116
+ --tokenizer-type PretrainedFromHF \
117
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
118
+ --init-method-std 0.0048 \
119
+ --embed-layernorm \
120
+ --fp16 \
121
+ --seed 42 \
122
+ --position-embedding-type alibi \
123
+ --checkpoint-activations \
124
+ --abort-on-unmet-fused-kernel-constraints \
125
+ --kill-switch-path $KILL_SWITCH_PATH \
126
+ --pad-vocab-size-to 250880 \
127
+ $OPTIMIZER_ARGS \
128
+ $EXIT_OPTS \
129
+ "
130
+
131
+ OUTPUT_ARGS=" \
132
+ --log-interval 1 \
133
+ --save-interval $SAVE_INTERVAL \
134
+ --eval-interval 250 \
135
+ --eval-iters 50 \
136
+ --tensorboard-dir $TENSORBOARD_PATH \
137
+ --tensorboard-queue-size 5 \
138
+ --log-timers-to-tensorboard \
139
+ --log-batch-size-to-tensorboard \
140
+ --log-validation-ppl-to-tensorboard \
141
+ "
142
+
143
+ ZERO_STAGE=1
144
+
145
+ config_json="./ds_config.$SLURM_JOBID.json"
146
+
147
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
148
+ cat <<EOT > $config_json
149
+ {
150
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
151
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
152
+ "gradient_clipping": 1.0,
153
+ "zero_optimization": {
154
+ "stage": $ZERO_STAGE
155
+ },
156
+ "fp16": {
157
+ "enabled": true,
158
+ "loss_scale": 0,
159
+ "loss_scale_window": 500,
160
+ "hysteresis": 2,
161
+ "min_loss_scale": 1,
162
+ "initial_scale_power": 12
163
+ },
164
+ "steps_per_print": 2000,
165
+ "wall_clock_breakdown": false
166
+ }
167
+ EOT
168
+
169
+
170
+ DEEPSPEED_ARGS=" \
171
+ --deepspeed \
172
+ --deepspeed_config ${config_json} \
173
+ --zero-stage ${ZERO_STAGE} \
174
+ --deepspeed-activation-checkpointing \
175
+ "
176
+
177
+ export LAUNCHER="python -u -m torch.distributed.run \
178
+ --nproc_per_node $GPUS_PER_NODE \
179
+ --nnodes $NNODES \
180
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
181
+ --rdzv_backend c10d \
182
+ --max_restarts 0 \
183
+ --tee 3 \
184
+ "
185
+
186
+ export CMD=" \
187
+ `pwd`/finetune_t0.py \
188
+ --tensor-model-parallel-size $TP_SIZE \
189
+ --pipeline-model-parallel-size $PP_SIZE \
190
+ $GPT_ARGS \
191
+ $OUTPUT_ARGS \
192
+ --save $CHECKPOINT_PATH \
193
+ --load $CHECKPOINT_PATH \
194
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
195
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
196
+ --dataloader-type single \
197
+ --data-impl mmap \
198
+ --distributed-backend nccl \
199
+ $DEEPSPEED_ARGS \
200
+ "
201
+
202
+ echo $CMD
203
+
204
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
205
+ export CUDA_LAUNCH_BLOCKING=1
206
+
207
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
208
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
209
+
210
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
211
+
212
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixlonglossseq
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixlonglossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixlong_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixlong_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqeos.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixlossseqeos
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixlossseqeos
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv3eos_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv3eos_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq-val.slurm ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=valxp3capmixnewcodelong
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 10:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixnewcodelonglossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+ # 250
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-only True \
135
+ --eval-iters 10 \
136
+ --tensorboard-dir $TENSORBOARD_PATH \
137
+ --tensorboard-queue-size 5 \
138
+ --log-timers-to-tensorboard \
139
+ --log-batch-size-to-tensorboard \
140
+ --log-validation-ppl-to-tensorboard \
141
+ "
142
+
143
+ ZERO_STAGE=1
144
+
145
+ config_json="./ds_config.$SLURM_JOBID.json"
146
+
147
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
148
+ cat <<EOT > $config_json
149
+ {
150
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
151
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
152
+ "gradient_clipping": 1.0,
153
+ "zero_optimization": {
154
+ "stage": $ZERO_STAGE
155
+ },
156
+ "fp16": {
157
+ "enabled": true,
158
+ "loss_scale": 0,
159
+ "loss_scale_window": 500,
160
+ "hysteresis": 2,
161
+ "min_loss_scale": 1,
162
+ "initial_scale_power": 12
163
+ },
164
+ "steps_per_print": 2000,
165
+ "wall_clock_breakdown": false
166
+ }
167
+ EOT
168
+
169
+
170
+ DEEPSPEED_ARGS=" \
171
+ --deepspeed \
172
+ --deepspeed_config ${config_json} \
173
+ --zero-stage ${ZERO_STAGE} \
174
+ --deepspeed-activation-checkpointing \
175
+ "
176
+
177
+ export LAUNCHER="python -u -m torch.distributed.run \
178
+ --nproc_per_node $GPUS_PER_NODE \
179
+ --nnodes $NNODES \
180
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
181
+ --rdzv_backend c10d \
182
+ --max_restarts 0 \
183
+ --tee 3 \
184
+ "
185
+
186
+ export CMD=" \
187
+ `pwd`/finetune_t0.py \
188
+ --tensor-model-parallel-size $TP_SIZE \
189
+ --pipeline-model-parallel-size $PP_SIZE \
190
+ $GPT_ARGS \
191
+ $OUTPUT_ARGS \
192
+ --save $CHECKPOINT_PATH \
193
+ --load $CHECKPOINT_PATH \
194
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
195
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
196
+ --dataloader-type single \
197
+ --data-impl mmap \
198
+ --distributed-backend nccl \
199
+ $DEEPSPEED_ARGS \
200
+ "
201
+
202
+ echo $CMD
203
+
204
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
205
+ export CUDA_LAUNCH_BLOCKING=1
206
+
207
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
208
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
209
+
210
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
211
+
212
+ echo "END TIME: $(date)"