applied-ai-018 commited on
Commit
2024260
·
verified ·
1 Parent(s): 0387b0f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evaluation/results/tr11/README.md +11 -0
  2. evaluation/results/tr11/bloom/bslmeval.json +0 -0
  3. evaluation/results/tr11/bloom/humaneval_temp02.json +1 -0
  4. evaluation/results/tr11/bloom/humaneval_temp06.json +1 -0
  5. evaluation/results/tr11/bloom/humaneval_temp08.json +1 -0
  6. evaluation/results/tr11/bloom/mdtable.txt +148 -0
  7. evaluation/results/tr11/bloom350m/humaneval_temp08.json +1 -0
  8. evaluation/results/tr11/templates.txt +65 -0
  9. evaluation/results/tr13/lmeval/download_bslmeval.slurm +37 -0
  10. evaluation/results/tr13/tzeroeval/convert_validation_2b5.slurm +352 -0
  11. train/tr1-13B-base/README.md +850 -0
  12. train/tr1-13B-base/chronicles.md +425 -0
  13. train/tr1-13B-base/start-tr1-13B +57 -0
  14. train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm +23 -0
  15. train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm +23 -0
  16. train/tr1-13B-base/tr1-13B-round1.slurm +174 -0
  17. train/tr1-13B-base/tr1-13B-short.slurm +189 -0
  18. train/tr1-13B-base/tr1-13B-slurm-status.slurm +23 -0
  19. train/tr10-13B-ml/README.md +111 -0
  20. train/tr10-13B-ml/start-tr10-13B +58 -0
  21. train/tr10-13B-ml/tr10-13B.slurm +182 -0
  22. train/tr12-1B3-oscar/tr12a-1B3-oscar-en-filtered.slurm +191 -0
  23. train/tr12-1B3-oscar/tr12b-1B3-oscar-en-filtered-dedup.slurm +191 -0
  24. train/tr12-1B3-oscar/tr12c-1B3-oscar-en-overfiltered.slurm +191 -0
  25. train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3mt.slurm +211 -0
  26. train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq-a100.slurm +211 -0
  27. train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq.slurm +211 -0
  28. train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq-a100.slurm +210 -0
  29. train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq.slurm +211 -0
  30. train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-val.slurm +211 -0
  31. train/tr13-mtf/smaller_models/tr13f-6B3-mtf-bos.slurm +209 -0
  32. train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3.slurm +210 -0
  33. train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3mixed.slurm +210 -0
  34. train/tr13-mtf/smaller_models/tr13f-6B3-mtf.slurm +209 -0
  35. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31lossseq.slurm +210 -0
  36. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3cap.slurm +210 -0
  37. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlossseq.slurm +211 -0
  38. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseq.slurm +210 -0
  39. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqbos.slurm +211 -0
  40. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq.slurm +211 -0
  41. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnostriplossseq.slurm +211 -0
  42. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixv2lossseqbitfit.slurm +211 -0
  43. train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3zzlossseq.slurm +211 -0
  44. train/tr13-mtf/tr13-176B-mtf-p31lossseq-val.slurm +225 -0
  45. train/tr14-mup/grid_search.sh +16 -0
  46. train/tr14-mup/lr_sweep.sh +7 -0
  47. train/tr14-mup/tr14-2B7-grid-search-mup.slurm +206 -0
  48. train/tr14-mup/tr14-2B7-mup-cluster.slurm +237 -0
  49. train/tr14-mup/tr14-2B7-mup.slurm +205 -0
  50. train/tr14-mup/tr14-39M-grid-search-mup.slurm +204 -0
evaluation/results/tr11/README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BigScience BLOOM Evaluation Results
2
+
3
+ This folder contains evaluation results of the BLOOM model family.
4
+
5
+ ## Evaluation Procedure
6
+
7
+ - bslmeval files were created using the below:
8
+ - https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/291
9
+ - https://github.com/bigscience-workshop/lm-evaluation-harness
10
+ - humaneval files were created using the HumanEval code dataset with the below:
11
+ - https://github.com/loubnabnl/bloom-code-evaluation
evaluation/results/tr11/bloom/bslmeval.json ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/results/tr11/bloom/humaneval_temp02.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.15524390243902436, "pass@10": 0.25233297635811675, "pass@100": 0.31325799399147314}
evaluation/results/tr11/bloom/humaneval_temp06.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.14417682926829267, "pass@10": 0.3220367632383857, "pass@100": 0.5380466541797105}
evaluation/results/tr11/bloom/humaneval_temp08.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.12228658536585368, "pass@10": 0.30805813658531067, "pass@100": 0.5545431515723145}
evaluation/results/tr11/bloom/mdtable.txt ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ | Task | Language | Metric | BLOOM-176B | OPT-175B* |
2
+ |:--------|:-----------------|:------------------------|-------------:|------------:|
3
+ | arc_challenge | eng | acc ↑ | 0.411 | 0.412 |
4
+ | arc_easy | eng | acc ↑ | 0.726 | 0.751 |
5
+ | axb (Median of 10 prompts) | eng | acc ↑ | 0.575 | 0.532 |
6
+ | axg (Median of 10 prompts) | eng | acc ↑ | 0.525 | 0.548 |
7
+ | boolq (Median of 11 prompts) | eng | acc ↑ | 0.635 | 0.622 |
8
+ | cb (Median of 15 prompts) | eng | acc ↑ | 0.339 | 0.411 |
9
+ | cola (Median of 5 prompts) | eng | acc ↑ | 0.39 | 0.444 |
10
+ | copa (Median of 9 prompts) | eng | acc ↑ | 0.56 | 0.55 |
11
+ | crows_pairs_english (Median of 6 prompts) | eng | acc ↑ | 0.5 | 0.502 |
12
+ | crows_pairs_french (Median of 7 prompts) | fra | acc ↑ | 0.506 | 0.499 |
13
+ | diabla (Median of 2 prompts) | eng | acc ↑ | 0.295 | 0.289 |
14
+ | gsarti/flores_101_afr | afr | byte_perplexity ↓ | 4.254 | 3.381 |
15
+ | gsarti/flores_101_amh | amh | byte_perplexity ↓ | 3.717 | 3.87 |
16
+ | gsarti/flores_101_ara | ara | byte_perplexity ↓ | 1.705 | 2.42 |
17
+ | gsarti/flores_101_asm | asm | byte_perplexity ↓ | 6.577 | 3.028 |
18
+ | gsarti/flores_101_ast | ast | byte_perplexity ↓ | 2.856 | 4.737 |
19
+ | gsarti/flores_101_azj | azj | byte_perplexity ↓ | 4.807 | 4.767 |
20
+ | gsarti/flores_101_bel | bel | byte_perplexity ↓ | 2.731 | 2.557 |
21
+ | gsarti/flores_101_ben | ben | byte_perplexity ↓ | 5.993 | 2.243 |
22
+ | gsarti/flores_101_bos | bos | byte_perplexity ↓ | 3.594 | 2.668 |
23
+ | gsarti/flores_101_bul | bul | byte_perplexity ↓ | 2.159 | 2.099 |
24
+ | gsarti/flores_101_cat | cat | byte_perplexity ↓ | 2.168 | 2.837 |
25
+ | gsarti/flores_101_ceb | ceb | byte_perplexity ↓ | 5.287 | 3.636 |
26
+ | gsarti/flores_101_ces | ces | byte_perplexity ↓ | 3.452 | 2.749 |
27
+ | gsarti/flores_101_ckb | ckb | byte_perplexity ↓ | 3.705 | 4.688 |
28
+ | gsarti/flores_101_cym | cym | byte_perplexity ↓ | 7.089 | 5.075 |
29
+ | gsarti/flores_101_dan | dan | byte_perplexity ↓ | 3.43 | 2.492 |
30
+ | gsarti/flores_101_deu | deu | byte_perplexity ↓ | 2.338 | 2.099 |
31
+ | gsarti/flores_101_ell | ell | byte_perplexity ↓ | 1.96 | 1.811 |
32
+ | gsarti/flores_101_eng | eng | byte_perplexity ↓ | 1.882 | 1.9 |
33
+ | gsarti/flores_101_est | est | byte_perplexity ↓ | 5.774 | 3.533 |
34
+ | gsarti/flores_101_fas | fas | byte_perplexity ↓ | 2.431 | 2.444 |
35
+ | gsarti/flores_101_fin | fin | byte_perplexity ↓ | 4.304 | 2.601 |
36
+ | gsarti/flores_101_fra | fra | byte_perplexity ↓ | 1.937 | 1.984 |
37
+ | gsarti/flores_101_ful | ful | byte_perplexity ↓ | 9.74 | 11.84 |
38
+ | gsarti/flores_101_gle | gle | byte_perplexity ↓ | 6.035 | 3.914 |
39
+ | gsarti/flores_101_glg | glg | byte_perplexity ↓ | 2.365 | 3.015 |
40
+ | gsarti/flores_101_guj | guj | byte_perplexity ↓ | 5.707 | 2.438 |
41
+ | gsarti/flores_101_hau | hau | byte_perplexity ↓ | 8.855 | 5.283 |
42
+ | gsarti/flores_101_heb | heb | byte_perplexity ↓ | 2.921 | 2.903 |
43
+ | gsarti/flores_101_hin | hin | byte_perplexity ↓ | 5.452 | 1.86 |
44
+ | gsarti/flores_101_hrv | hrv | byte_perplexity ↓ | 3.706 | 2.715 |
45
+ | gsarti/flores_101_hun | hun | byte_perplexity ↓ | 4.059 | 2.865 |
46
+ | gsarti/flores_101_hye | hye | byte_perplexity ↓ | 3.127 | 3.411 |
47
+ | gsarti/flores_101_ibo | ibo | byte_perplexity ↓ | 3.95 | 8.008 |
48
+ | gsarti/flores_101_ind | ind | byte_perplexity ↓ | 1.976 | 2.632 |
49
+ | gsarti/flores_101_isl | isl | byte_perplexity ↓ | 5.501 | 4.701 |
50
+ | gsarti/flores_101_ita | ita | byte_perplexity ↓ | 2.314 | 2.104 |
51
+ | gsarti/flores_101_jav | jav | byte_perplexity ↓ | 4.942 | 8.16 |
52
+ | gsarti/flores_101_jpn | jpn | byte_perplexity ↓ | 2.259 | 2.198 |
53
+ | gsarti/flores_101_kam | kam | byte_perplexity ↓ | 9.743 | 10.981 |
54
+ | gsarti/flores_101_kan | kan | byte_perplexity ↓ | 6.234 | 2.373 |
55
+ | gsarti/flores_101_kat | kat | byte_perplexity ↓ | 2.051 | 2.466 |
56
+ | gsarti/flores_101_kaz | kaz | byte_perplexity ↓ | 3.039 | 4.376 |
57
+ | gsarti/flores_101_kea | kea | byte_perplexity ↓ | 7.147 | 9.632 |
58
+ | gsarti/flores_101_khm | khm | byte_perplexity ↓ | 3.367 | 2.646 |
59
+ | gsarti/flores_101_kir | kir | byte_perplexity ↓ | 3.241 | 4.522 |
60
+ | gsarti/flores_101_kor | kor | byte_perplexity ↓ | 2.902 | 3.376 |
61
+ | gsarti/flores_101_lao | lao | byte_perplexity ↓ | 2.331 | 3.106 |
62
+ | gsarti/flores_101_lav | lav | byte_perplexity ↓ | 5.224 | 4.811 |
63
+ | gsarti/flores_101_lin | lin | byte_perplexity ↓ | 4.847 | 8.871 |
64
+ | gsarti/flores_101_lit | lit | byte_perplexity ↓ | 4.543 | 5.183 |
65
+ | gsarti/flores_101_ltz | ltz | byte_perplexity ↓ | 5.591 | 7.158 |
66
+ | gsarti/flores_101_lug | lug | byte_perplexity ↓ | 5.43 | 7.399 |
67
+ | gsarti/flores_101_luo | luo | byte_perplexity ↓ | 12.031 | 11.951 |
68
+ | gsarti/flores_101_mal | mal | byte_perplexity ↓ | 4.794 | 2.054 |
69
+ | gsarti/flores_101_mar | mar | byte_perplexity ↓ | 6.857 | 2.274 |
70
+ | gsarti/flores_101_mkd | mkd | byte_perplexity ↓ | 2.335 | 2.538 |
71
+ | gsarti/flores_101_mlt | mlt | byte_perplexity ↓ | 9.041 | 5.996 |
72
+ | gsarti/flores_101_mon | mon | byte_perplexity ↓ | 3.095 | 4.519 |
73
+ | gsarti/flores_101_mri | mri | byte_perplexity ↓ | 5.266 | 4.438 |
74
+ | gsarti/flores_101_msa | msa | byte_perplexity ↓ | 2.222 | 2.935 |
75
+ | gsarti/flores_101_mya | mya | byte_perplexity ↓ | 2.523 | 2.413 |
76
+ | gsarti/flores_101_nld | nld | byte_perplexity ↓ | 2.799 | 2.293 |
77
+ | gsarti/flores_101_nob | nob | byte_perplexity ↓ | 3.629 | 2.593 |
78
+ | gsarti/flores_101_npi | npi | byte_perplexity ↓ | 6.666 | 2.499 |
79
+ | gsarti/flores_101_nso | nso | byte_perplexity ↓ | 5.015 | 8.485 |
80
+ | gsarti/flores_101_nya | nya | byte_perplexity ↓ | 4.938 | 7.548 |
81
+ | gsarti/flores_101_oci | oci | byte_perplexity ↓ | 3.607 | 4.936 |
82
+ | gsarti/flores_101_orm | orm | byte_perplexity ↓ | 11.316 | 7.145 |
83
+ | gsarti/flores_101_ory | ory | byte_perplexity ↓ | 5.982 | 2.668 |
84
+ | gsarti/flores_101_pan | pan | byte_perplexity ↓ | 4.772 | 2.782 |
85
+ | gsarti/flores_101_pol | pol | byte_perplexity ↓ | 3.012 | 2.432 |
86
+ | gsarti/flores_101_por | por | byte_perplexity ↓ | 1.841 | 2.178 |
87
+ | gsarti/flores_101_pus | pus | byte_perplexity ↓ | 4.624 | 4.785 |
88
+ | gsarti/flores_101_ron | ron | byte_perplexity ↓ | 3.05 | 2.197 |
89
+ | gsarti/flores_101_rus | rus | byte_perplexity ↓ | 1.708 | 1.689 |
90
+ | gsarti/flores_101_slk | slk | byte_perplexity ↓ | 4.038 | 3.419 |
91
+ | gsarti/flores_101_slv | slv | byte_perplexity ↓ | 4.141 | 3.582 |
92
+ | gsarti/flores_101_sna | sna | byte_perplexity ↓ | 4.711 | 5.588 |
93
+ | gsarti/flores_101_snd | snd | byte_perplexity ↓ | 4.206 | 5.667 |
94
+ | gsarti/flores_101_som | som | byte_perplexity ↓ | 9.154 | 4.788 |
95
+ | gsarti/flores_101_spa | spa | byte_perplexity ↓ | 1.796 | 2.098 |
96
+ | gsarti/flores_101_srp | srp | byte_perplexity ↓ | 2.241 | 2.688 |
97
+ | gsarti/flores_101_swe | swe | byte_perplexity ↓ | 3.345 | 2.468 |
98
+ | gsarti/flores_101_swh | swh | byte_perplexity ↓ | 2.684 | 4.473 |
99
+ | gsarti/flores_101_tam | tam | byte_perplexity ↓ | 5.165 | 2.024 |
100
+ | gsarti/flores_101_tel | tel | byte_perplexity ↓ | 6.81 | 2.407 |
101
+ | gsarti/flores_101_tgk | tgk | byte_perplexity ↓ | 3.785 | 4.899 |
102
+ | gsarti/flores_101_tgl | tgl | byte_perplexity ↓ | 3.75 | 2.738 |
103
+ | gsarti/flores_101_tha | tha | byte_perplexity ↓ | 2.104 | 2.035 |
104
+ | gsarti/flores_101_tur | tur | byte_perplexity ↓ | 3.318 | 2.622 |
105
+ | gsarti/flores_101_ukr | ukr | byte_perplexity ↓ | 2.089 | 1.93 |
106
+ | gsarti/flores_101_umb | umb | byte_perplexity ↓ | 11.766 | 11.64 |
107
+ | gsarti/flores_101_urd | urd | byte_perplexity ↓ | 1.779 | 2.982 |
108
+ | gsarti/flores_101_uzb | uzb | byte_perplexity ↓ | 8.5 | 13.209 |
109
+ | gsarti/flores_101_vie | vie | byte_perplexity ↓ | 1.659 | 2.229 |
110
+ | gsarti/flores_101_wol | wol | byte_perplexity ↓ | 6.142 | 13.945 |
111
+ | gsarti/flores_101_xho | xho | byte_perplexity ↓ | 4.69 | 8.42 |
112
+ | gsarti/flores_101_yor | yor | byte_perplexity ↓ | 4.361 | 7.636 |
113
+ | gsarti/flores_101_zho_simpl | zho_simpl | byte_perplexity ↓ | 2.118 | 5.113 |
114
+ | gsarti/flores_101_zho_trad | zho_trad | byte_perplexity ↓ | 2.274 | 5.67 |
115
+ | gsarti/flores_101_zul | zul | byte_perplexity ↓ | 6.017 | 7.341 |
116
+ | headqa | esp | acc ↑ | 0.346 | 0.244 |
117
+ | hellaswag | eng | acc ↑ | 0.535 | 0.592 |
118
+ | lambada_mt_de | deu | acc ↑ | 0.329 | 0.358 |
119
+ | lambada_mt_en | eng | acc ↑ | 0.672 | 0.747 |
120
+ | lambada_mt_es | esp | acc ↑ | 0.476 | 0.397 |
121
+ | lambada_mt_it | ita | acc ↑ | 0.406 | 0.409 |
122
+ | logiqa | eng | acc ↑ | 0.235 | 0.244 |
123
+ | mathqa | eng | acc ↑ | 0.277 | 0.268 |
124
+ | mc_taco | eng | em ↑ | 0.131 | 0.124 |
125
+ | mnli (Median of 15 prompts) | eng | acc ↑ | 0.355 | 0.36 |
126
+ | mnli_mismatched (Median of 15 prompts) | eng | acc ↑ | 0.355 | 0.36 |
127
+ | mrpc | eng | acc ↑ | 0.387 | 0.446 |
128
+ | multirc (Median of 11 prompts) | eng | acc ↑ | 0.571 | 0.599 |
129
+ | openbookqa | eng | acc ↑ | 0.312 | 0.322 |
130
+ | piqa | eng | acc ↑ | 0.781 | 0.791 |
131
+ | prost | eng | acc ↑ | 0.298 | 0.299 |
132
+ | pubmedqa | eng | acc ↑ | 0.741 | 0.709 |
133
+ | qnli | eng | acc ↑ | 0.517 | 0.554 |
134
+ | qqp (Median of 7 prompts) | eng | acc ↑ | 0.588 | 0.395 |
135
+ | race | eng | acc ↑ | 0.39 | 0.402 |
136
+ | rte (Median of 6 prompts) | eng | acc ↑ | 0.52 | 0.495 |
137
+ | sciq | eng | acc ↑ | 0.936 | 0.948 |
138
+ | sst (Median of 6 prompts) | eng | acc ↑ | 0.604 | 0.647 |
139
+ | triviaqa | eng | acc ↑ | 0.183 | 0.342 |
140
+ | tydiqa_primary (Median of 16 prompts) | eng | acc ↑ | 0.281 | 0.148 |
141
+ | webqs | eng | acc ↑ | 0.062 | 0.159 |
142
+ | wic (Median of 11 prompts) | eng | acc ↑ | 0.506 | 0.498 |
143
+ | winogrande | eng | acc ↑ | 0.71 | 0.736 |
144
+ | wnli (Median of 6 prompts) | eng | acc ↑ | 0.57 | 0.563 |
145
+ | wsc (Median of 11 prompts) | eng | acc ↑ | 0.519 | 0.413 |
146
+ | humaneval | python | pass@1 ↑ | 0.155 | 0.0 |
147
+ | humaneval | python | pass@10 ↑ | 0.322 | 0.0 |
148
+ | humaneval | python | pass@100 ↑ | 0.555 | 0.003 |
evaluation/results/tr11/bloom350m/humaneval_temp08.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.004573170731707317, "pass@10": 0.025074764360845308, "pass@100": 0.05906180468454194}
evaluation/results/tr11/templates.txt ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ piaf,None,None,"Given_above_context"
2
+ piaf,None,None,"given_passage_answer"
3
+ piaf,None,None,"context_follow_q"
4
+ piaf,None,None,"after_reading"
5
+ piaf,None,None,"extract_the_answer"
6
+ GEM/wiki_lingua,ar,ar,"article_summary_ar"
7
+ GEM/wiki_lingua,ar,ar,"write_abstract_ar"
8
+ GEM/wiki_lingua,ar,ar,"summarize_above_ar"
9
+ GEM/wiki_lingua,ar,ar,"rephrase_ar"
10
+ GEM/wiki_lingua,ar,ar,"tldr_ar"
11
+ GEM/wiki_lingua,en,en,"article_summary_en"
12
+ GEM/wiki_lingua,en,en,"write_abstract_en"
13
+ GEM/wiki_lingua,en,en,"summarize_above_en"
14
+ GEM/wiki_lingua,en,en,"rephrase_en"
15
+ GEM/wiki_lingua,en,en,"tldr_en"
16
+ GEM/wiki_lingua,es,es,"article_summary_es"
17
+ GEM/wiki_lingua,es,es,"write_abstract_es"
18
+ GEM/wiki_lingua,es,es,"summarize_above_es"
19
+ GEM/wiki_lingua,es,es,"rephrase_es"
20
+ GEM/wiki_lingua,es,es,"tldr_es"
21
+ GEM/wiki_lingua,fr,fr,"article_summary_fr"
22
+ GEM/wiki_lingua,fr,fr,"write_abstract_fr"
23
+ GEM/wiki_lingua,fr,fr,"summarize_above_fr"
24
+ GEM/wiki_lingua,fr,fr,"rephrase_fr"
25
+ GEM/wiki_lingua,fr,fr,"tldr_fr"
26
+ GEM/wiki_lingua,hi,hi,"article_summary_hi"
27
+ GEM/wiki_lingua,hi,hi,"write_abstract_hi"
28
+ GEM/wiki_lingua,hi,hi,"summarize_above_hi"
29
+ GEM/wiki_lingua,hi,hi,"rephrase_hi"
30
+ GEM/wiki_lingua,hi,hi,"tldr_hi"
31
+ GEM/wiki_lingua,id,id,"article_summary_id"
32
+ GEM/wiki_lingua,id,id,"write_abstract_id"
33
+ GEM/wiki_lingua,id,id,"summarize_above_id"
34
+ GEM/wiki_lingua,id,id,"rephrase_id"
35
+ GEM/wiki_lingua,id,id,"tldr_id"
36
+ GEM/wiki_lingua,pt,pt,"article_summary_pt"
37
+ GEM/wiki_lingua,pt,pt,"write_abstract_pt"
38
+ GEM/wiki_lingua,pt,pt,"summarize_above_pt"
39
+ GEM/wiki_lingua,pt,pt,"rephrase_pt"
40
+ GEM/wiki_lingua,pt,pt,"tldr_pt"
41
+ GEM/wiki_lingua,vi,vi,"article_summary_vi"
42
+ GEM/wiki_lingua,vi,vi,"write_abstract_vi"
43
+ GEM/wiki_lingua,vi,vi,"summarize_above_vi"
44
+ GEM/wiki_lingua,vi,vi,"rephrase_vi"
45
+ GEM/wiki_lingua,vi,vi,"tldr_vi"
46
+ GEM/wiki_lingua,zh,zh,"article_summary_zh"
47
+ GEM/wiki_lingua,zh,zh,"write_abstract_zh"
48
+ GEM/wiki_lingua,zh,zh,"summarize_above_zh"
49
+ GEM/wiki_lingua,zh,zh,"rephrase_zh"
50
+ GEM/wiki_lingua,zh,zh,"tldr_zh"
51
+ GEM/web_nlg,en,en,"use-category"
52
+ GEM/web_nlg,en,en,"PALM_prompt"
53
+ GEM/web_nlg,en,en,"very-explicit-description"
54
+ GEM/web_nlg,en,en,"explicit-graph-description2"
55
+ GEM/web_nlg,en,en,"non-explicit-description"
56
+ GEM/web_nlg,ru,ru,"use-category"
57
+ GEM/web_nlg,ru,ru,"PAM-russian"
58
+ GEM/web_nlg,ru,ru,"PALM_prompt"
59
+ GEM/web_nlg,ru,ru,"explicit-graph-description-2-Russian"
60
+ GEM/web_nlg,ru,ru,"explicit-graph-description"
61
+ wmt14,fr-en,fr-en,"version-en-fr-target"
62
+ wmt14,fr-en,fr-en,"a_good_translation-fr-en-target"
63
+ wmt14,fr-en,fr-en,"a_good_translation-en-fr-source+target"
64
+ wmt14,fr-en,fr-en,"xglm-en-fr-source-target"
65
+ wmt14,fr-en,fr-en,"gpt3-en-fr"
evaluation/results/tr13/lmeval/download_bslmeval.slurm ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=download-bslmeval
3
+ #SBATCH --partition=prepost
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --account=six@cpu
11
+
12
+ set -x -e
13
+
14
+ echo "START TIME: $(date)"
15
+
16
+ source $six_ALL_CCFRWORK/start-py38-pt111
17
+ conda activate muennighofflmeval
18
+
19
+ #export HF_DATASETS_OFFLINE=1
20
+ #export TRANSFORMERS_OFFLINE=1
21
+
22
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
23
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
24
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
25
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
26
+ export TOKENIZERS_PARALLELISM=false
27
+
28
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/lm-evaluation-harness
29
+
30
+ # GEM/web_nlg_en,GEM/web_nlg_en_challenge_test_numbers,GEM/web_nlg_en_challenge_test_scramble,GEM/web_nlg_en_challenge_validation_sample,GEM/web_nlg_ru,GEM/web_nlg_ru_challenge_test_scramble,GEM/web_nlg_ru_challenge_validation_sample,GEM/wiki_auto_asset_turk_challenge_test_asset_backtranslation,GEM/wiki_auto_asset_turk_challenge_test_asset_bfp02,GEM/wiki_auto_asset_turk_challenge_test_asset_bfp05,GEM/wiki_auto_asset_turk_challenge_test_asset_nopunc,GEM/wiki_auto_asset_turk_challenge_test_turk_backtranslation,GEM/wiki_auto_asset_turk_challenge_test_turk_bfp02,GEM/wiki_auto_asset_turk_challenge_test_turk_bfp05,GEM/wiki_auto_asset_turk_challenge_test_turk_nopunc,GEM/wiki_auto_asset_turk_test_asset,GEM/wiki_auto_asset_turk_test_turk,GEM/wiki_lingua_ar,GEM/wiki_lingua_cs,GEM/wiki_lingua_de,GEM/wiki_lingua_en,GEM/wiki_lingua_es,GEM/wiki_lingua_fr,GEM/wiki_lingua_hi,GEM/wiki_lingua_id,GEM/wiki_lingua_it,GEM/wiki_lingua_ja,GEM/wiki_lingua_ko,GEM/wiki_lingua_nl,GEM/wiki_lingua_pt,GEM/wiki_lingua_ru,GEM/wiki_lingua_th,GEM/wiki_lingua_tr,GEM/wiki_lingua_vi,GEM/wiki_lingua_zh,gem_xsum,gem_xsum_challenge_sample,gem_xsum_challenge_test_backtranslation,gem_xsum_challenge_test_bfp_02,gem_xsum_challenge_test_bfp_05,gem_xsum_challenge_test_covid,gem_xsum_challenge_test_nopunc \
31
+ python3 main.py --model hf-causal \
32
+ --model_args pretrained=hf-internal-testing/tiny-random-gpt2,use_accelerate=True,tokenizer=hf-internal-testing/tiny-random-gpt2,dtype=float16 \
33
+ --tasks wmt14_fr_en,wmt19_ru_en,wmt19_zh_en \
34
+ --device cuda \
35
+ --limit 1 \
36
+ --no_cache \
37
+ --num_fewshot 0
evaluation/results/tr13/tzeroeval/convert_validation_2b5.slurm ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ckpts
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=ajs@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+ conda activate muennighoffmodelconv
18
+
19
+ CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13c-2B5-ml-t0/checkpoints/xp3capmixnewcodelonglossseq
20
+ #CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/p31lossseq
21
+
22
+ CKPTS=(
23
+ global_step250
24
+ global_step500
25
+ global_step750
26
+ global_step1000
27
+ global_step1250
28
+ global_step1500
29
+ global_step1750
30
+ global_step2000
31
+ global_step2250
32
+ global_step2500
33
+ global_step2750
34
+ global_step3000
35
+ )
36
+ EXAMPLE_CKPT=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/2b5/bloom-2b5
37
+ DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/2b5t0
38
+ OUT_PREFIX=xp3capmixlossseq_
39
+ #OUT_PREFIX=p31lossseq
40
+
41
+ TP=1
42
+
43
+ ### CONVERT ###
44
+
45
+
46
+ for i in {0..11}; do
47
+ CKPT=${CKPTS[$i]}
48
+ echo "$i"
49
+ echo "Running $CKPT"
50
+
51
+ OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT"
52
+ python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json
53
+
54
+ # Copy tokenizer.json etc
55
+ cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/
56
+
57
+ eval_script="./eval_$i.slurm"
58
+ cat <<EOT > $eval_script
59
+ #!/bin/bash
60
+ #SBATCH --job-name=evaluate_t0
61
+ #SBATCH --nodes=1
62
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
63
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
64
+ #SBATCH --hint=nomultithread # we get physical cores not logical
65
+ #SBATCH --gres=gpu:1 # number of gpus
66
+ #SBATCH --constraint=a100
67
+ #SBATCH --time 5:00:00 # maximum execution time (HH:MM:SS)
68
+ #SBATCH --output=%x-%j.out # output file name
69
+ #SBATCH --account=six@a100
70
+ #SBATCH --array=0-168
71
+
72
+ set -x -e
73
+
74
+ source $six_ALL_CCFRWORK/start-py38-pt111
75
+ conda activate thomas_t_zero_evaluation
76
+
77
+ CHECKPOINT_PATH=$OUTPUTCKPT
78
+
79
+ WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0
80
+ pushd "\$WORKDIR"
81
+ OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation"
82
+ mkdir -p "\$OUTPUT_DIR"
83
+
84
+ # Validation
85
+ DATASETS_AND_CONFIGS_VAL=(
86
+ head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation
87
+ head_qa,en,en,"multiple_choice_q_and_a_en",validation
88
+ head_qa,en,en,"multiple_choice_q_and_a_index_en",validation
89
+ head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation
90
+ head_qa,en,en,"multiple_choice_a_and_q_en",validation
91
+ head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation
92
+ head_qa,es,en,"multiple_choice_q_and_a_en",validation
93
+ head_qa,es,en,"multiple_choice_q_and_a_index_en",validation
94
+ head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation
95
+ head_qa,es,en,"multiple_choice_a_and_q_en",validation
96
+ climate_fever,None,None,"first_evidence_and_claim_itemization",test
97
+ climate_fever,None,None,"claim_and_all_supporting_evidences",test
98
+ climate_fever,None,None,"fifth_evidence_and_claim_itemization",test
99
+ climate_fever,None,None,"third_evidence_claim_pair",test
100
+ climate_fever,None,None,"second_evidence_and_claim_itemization",test
101
+ codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train
102
+ codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train
103
+ codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train
104
+ aqua_rat,raw,None,"select_the_best_option",validation
105
+ aqua_rat,raw,None,"answer_quiz",validation
106
+ aqua_rat,raw,None,"Answer questions from options",validation
107
+ commonsense_qa,None,None,"answer_given_question_without_options",validation
108
+ commonsense_qa,None,None,"question_answering",validation
109
+ commonsense_qa,None,None,"most_suitable_answer",validation
110
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
111
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
112
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
113
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
114
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
115
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
116
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
117
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
118
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
119
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
120
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
121
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
122
+ art,None,None,"choose_hypothesis_options",validation
123
+ art,None,None,"choose_hypothesis_believable",validation
124
+ art,None,None,"choose_hypothesis",validation
125
+ art,None,None,"choose_hypothesis_desc",validation
126
+ art,None,None,"choose_hypothesis_likely",validation
127
+ banking77,None,None,"help_page_topic",test
128
+ banking77,None,None,"direct_to_which_department",test
129
+ banking77,None,None,"rephrase_as_banking_term",test
130
+ blbooksgenre,title_genre_classifiction,None,"multi-choice",train
131
+ blbooksgenre,title_genre_classifiction,None,"premise_context_first",train
132
+ blbooksgenre,title_genre_classifiction,None,"classify",train
133
+ blimp,adjunct_island,None,"grammatical_between_1_2",train
134
+ blimp,adjunct_island,None,"grammatical_between_A_B",train
135
+ blimp,adjunct_island,None,"grammatical_which_one_1_2",train
136
+ blimp,adjunct_island,None,"single_sentence_bad_yes_no",train
137
+ blimp,adjunct_island,None,"single_sentence_good_yes_no",train
138
+ conv_ai_3,None,None,"clarification_needed",validation
139
+ conv_ai_3,None,None,"score_give_number",validation
140
+ conv_ai_3,None,None,"ambiguous",validation
141
+ conv_ai_3,None,None,"directly_answer",validation
142
+ conv_ai_3,None,None,"score_how_much",validation
143
+ craigslist_bargains,None,None,"good deal for seller no list price implicit",validation
144
+ craigslist_bargains,None,None,"good deal for seller no list price",validation
145
+ craigslist_bargains,None,None,"good deal for seller",validation
146
+ craigslist_bargains,None,None,"best deal",validation
147
+ ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation
148
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation
149
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation
150
+ ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation
151
+ ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation
152
+ emo,None,None,"persons_describe",validation
153
+ emo,None,None,"final_message",validation
154
+ emo,None,None,"what_emotion_do_you_think",validation
155
+ emo,None,None,"emotional_state",validation
156
+ emo,None,None,"dialogue_between",validation
157
+ emotion,None,None,"choose_the_best_emotion_label",test
158
+ emotion,None,None,"reply_with_emoation_label",test
159
+ emotion,None,None,"answer_with_class_label",test
160
+ emotion,None,None,"answer_question_with_emotion_label",test
161
+ financial_phrasebank,sentences_allagree,None,"share_price_option",train
162
+ financial_phrasebank,sentences_allagree,None,"sentiment",train
163
+ financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train
164
+ financial_phrasebank,sentences_allagree,None,"complementary_industries",train
165
+ financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train
166
+ glue,cola,None,"Make sense yes no",validation
167
+ glue,cola,None,"is_this_correct",validation
168
+ glue,cola,None,"editing",validation
169
+ glue,cola,None,"Following sentence acceptable",validation
170
+ glue,cola,None,"Previous sentence acceptable",validation
171
+ glue,sst2,None,"positive negative after",validation
172
+ glue,sst2,None,"review",validation
173
+ glue,sst2,None,"said",validation
174
+ glue,sst2,None,"following positive negative",validation
175
+ glue,sst2,None,"happy or mad",validation
176
+ health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation
177
+ health_fact,None,None,"claim_explanation_classification",validation
178
+ health_fact,None,None,"claim_veracity_classification_tell_me",validation
179
+ hlgd,None,None,"is_same_event_with_time_interrogative_related",validation
180
+ hlgd,None,None,"is_same_event_interrogative_talk",validation
181
+ hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation
182
+ hlgd,None,None,"is_same_event_refer",validation
183
+ hlgd,None,None,"is_same_event_editor_asks",validation
184
+ hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train
185
+ hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train
186
+ hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train
187
+ hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train
188
+ hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train
189
+ liar,None,None,"Given statement guess category",validation
190
+ lince,sa_spaeng,None,"original poster expressed sentiment",validation
191
+ lince,sa_spaeng,None,"sentiment trying to express",validation
192
+ lince,sa_spaeng,None,"express sentiment",validation
193
+ lince,sa_spaeng,None,"negation template",validation
194
+ lince,sa_spaeng,None,"the author seem",validation
195
+ math_qa,None,None,"choose_correct_og",test
196
+ math_qa,None,None,"pick_the_correct",test
197
+ math_qa,None,None,"first_choice_then_problem",test
198
+ math_qa,None,None,"problem_set_type",test
199
+ math_qa,None,None,"gre_problem",test
200
+ movie_rationales,None,None,"Standard binary sentiment analysis",validation
201
+ movie_rationales,None,None,"Evidences sentiment classification",validation
202
+ movie_rationales,None,None,"Evidences + review",validation
203
+ movie_rationales,None,None,"Generate evidences and sentiment",validation
204
+ mwsc,None,None,"in-the-sentence-question-first",validation
205
+ mwsc,None,None,"what-think",validation
206
+ mwsc,None,None,"in-the-sentence",validation
207
+ mwsc,None,None,"options-or",validation
208
+ mwsc,None,None,"is-correct",validation
209
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation
210
+ poem_sentiment,None,None,"question_answer_format",validation
211
+ poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation
212
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation
213
+ poem_sentiment,None,None,"most_appropriate_sentiment",validation
214
+ onestop_english,None,None,"esl_context",train
215
+ onestop_english,None,None,"ara_context",train
216
+ onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train
217
+ onestop_english,None,None,"esl_variation",train
218
+ onestop_english,None,None,"assess",train
219
+ pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train
220
+ pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train
221
+ riddle_sense,None,None,"most_suitable_answer",validation
222
+ riddle_sense,None,None,"answer_given_question_without_options",validation
223
+ riddle_sense,None,None,"question_to_answer_index",validation
224
+ riddle_sense,None,None,"question_answering",validation
225
+ scicite,None,None,"Classify intent w/section (select choice)",validation
226
+ scicite,None,None,"Classify intent (choices first)",validation
227
+ scicite,None,None,"Classify intent (select choice)",validation
228
+ scicite,None,None,"Classify intent",validation
229
+ scicite,None,None,"can_describe",validation
230
+ selqa,answer_selection_analysis,None,"is-he-talking-about",validation
231
+ selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation
232
+ selqa,answer_selection_analysis,None,"make-sense-rand",validation
233
+ selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation
234
+ snips_built_in_intents,None,None,"voice_intent",train
235
+ snips_built_in_intents,None,None,"categorize_query",train
236
+ snips_built_in_intents,None,None,"intent_query",train
237
+ snips_built_in_intents,None,None,"categorize_query_brief",train
238
+ snips_built_in_intents,None,None,"query_intent",train
239
+ )
240
+
241
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}"
242
+ echo "\$ARGUMENT"
243
+
244
+ # Run T0 evaluation
245
+ # For PrefixLM add --prefixlm
246
+ IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}"
247
+ python t-zero/evaluation/run_eval.py \
248
+ --dataset_name "\$dataset_name" \
249
+ --dataset_config_name "\$dataset_config_name" \
250
+ --template_config_name "\$template_config_name" \
251
+ --template_name "\$template_name" \
252
+ --split "\$split" \
253
+ --model_name_or_path "\$CHECKPOINT_PATH" \
254
+ --output_dir "\$OUTPUT_DIR" \
255
+ --per_device_eval_batch_size 4 \
256
+ --max_length 2048 \
257
+ --dtype float16
258
+ EOT
259
+
260
+ sbatch $eval_script
261
+
262
+
263
+ lm_eval_script="./lm_eval_$i.slurm"
264
+ cat <<EOT > $lm_eval_script
265
+ #!/bin/bash
266
+ #SBATCH --job-name=lmeval
267
+ #SBATCH --nodes=1
268
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
269
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
270
+ #SBATCH --hint=nomultithread # we get physical cores not logical
271
+ #SBATCH --gres=gpu:1 # number of gpus
272
+ #SBATCH --constraint=a100
273
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
274
+ #SBATCH --output=%x-%j.out # output file name
275
+ #SBATCH --account=six@a100
276
+ #SBATCH --array=0-22
277
+
278
+ set -x -e
279
+
280
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
281
+ conda activate muennighofflmevalgen
282
+
283
+ echo "START TIME: $(date)"
284
+
285
+ # defining the right environment variables
286
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
287
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
288
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
289
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
290
+ export HF_DATASETS_OFFLINE=1
291
+ export TRANSFORMERS_OFFLINE=1
292
+ export TOKENIZERS_PARALLELISM=false
293
+
294
+ # Converted transformer checkpoint
295
+ MODEL_CKPT=$OUTPUTCKPT
296
+
297
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
298
+
299
+
300
+ DATASETS_AND_CONFIGS=(
301
+ wmt14_fr_en,fr-en,"version-en-fr-target"
302
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
303
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
304
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
305
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
306
+ wmt14_fr_en,fr-en,"version-fr-en-target"
307
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
308
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
309
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
310
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
311
+ wmt14_hi_en,hi-en,"version-en-hi-target"
312
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
313
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
314
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
315
+ wmt14_hi_en,hi-en,"gpt-3-en-hi-target"
316
+ wmt14_hi_en,hi-en,"version-hi-en-target"
317
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
318
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
319
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
320
+ wmt14_hi_en,hi-en,"gpt-3-hi-en-target"
321
+ mlsum_es,"es","layman_summ_es"
322
+ mlsum_es,"es","palm_prompt"
323
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
324
+ )
325
+
326
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}"
327
+ echo "\$ARGUMENT"
328
+
329
+ IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}"
330
+
331
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
332
+ python main.py \
333
+ --model_api_name 'hf-causal' \
334
+ --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=float16" \
335
+ --device cuda \
336
+ --batch_size 16 \
337
+ --no_tracking \
338
+ --task_name "\$dataset_name" \
339
+ --template_names "\$template_name" \
340
+ --bootstrap_iters 10 \
341
+ --limit 3000
342
+
343
+ mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name"
344
+ mv "outputs/*$CKPT*\$dataset_name*" "$OUTPUTCKPT/evaluation/\$dataset_name/"
345
+
346
+ echo "END TIME: $(date)"
347
+ EOT
348
+
349
+ sbatch $lm_eval_script
350
+
351
+
352
+ done
train/tr1-13B-base/README.md ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Train 1 - 13B - unmodified Megatron gpt2 - baseline
2
+
3
+
4
+ ## Task
5
+
6
+ Auto-regressive objective using regular Megatron-LM GPT2 language model
7
+
8
+ ## Environment
9
+
10
+ To launch the environment use [start-tr1-13B](./start-tr1-13B)
11
+
12
+ ```
13
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
14
+ ```
15
+
16
+ We are using the following branches specific to this training:
17
+
18
+ - `$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B` a frozen `tr1-13B` branch - can cherry pick from `main` if need be.
19
+ - `$six_ALL_CCFRWORK/code/tr1-13B/DeepSpeed-big-science` - a mostly frozen `big-science` branch - under Deepspeed's team control - so it may also require a specific SHA if something gets broken upstream.
20
+
21
+
22
+ How the environment was built:
23
+ ```
24
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
25
+
26
+ conda create -y -n tr1-13B python=3.8
27
+ conda activate tr1-13B
28
+ conda install pytorch==1.8.1 torchvision cudatoolkit=10.2 -c pytorch -y
29
+ pip install deepspeed
30
+ pip install tensorboard
31
+
32
+ mkdir $six_ALL_CCFRWORK/code/tr1-13B
33
+
34
+ cd $six_ALL_CCFRWORK/code/tr1-13B
35
+ git clone https://github.com/bigscience-workshop/bigscience
36
+
37
+ cd $six_ALL_CCFRWORK/code/tr1-13B
38
+ git clone https://github.com/huggingface/transformers
39
+ cd transformers
40
+ pip install -e .
41
+
42
+ cd $six_ALL_CCFRWORK/code/tr1-13B
43
+ git clone https://github.com/bigscience-workshop/Megatron-DeepSpeed Megatron-DeepSpeed-tr1-13B
44
+ cd Megatron-DeepSpeed-tr1-13B
45
+ git checkout tr1-13B
46
+ pip install -r requirements.txt
47
+ pip install -e .
48
+ mkdir data
49
+ cd data
50
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json
51
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt
52
+ ```
53
+
54
+ `apex` and `deepspeed` build require an instance w/ beefy cpu and internet (unless cloned beforehand), so continue on the `prepost` partition:
55
+
56
+ ```
57
+ ssh jean-zay-pp
58
+ conda activate tr1-13B
59
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
60
+
61
+ cd $six_ALL_CCFRWORK/code/tr1-13B
62
+ git clone https://github.com/microsoft/DeepSpeed DeepSpeed-big-science
63
+ cd DeepSpeed-big-science
64
+ git checkout big-science
65
+ rm -rf build
66
+ TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
67
+
68
+ cd $six_ALL_CCFRWORK/code/tr1-13B
69
+ git clone https://github.com/NVIDIA/apex
70
+ cd apex
71
+ pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log
72
+
73
+ #cp $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B ...
74
+
75
+ ```
76
+
77
+
78
+ ## Architecture
79
+
80
+ Config:
81
+
82
+ ```
83
+ NLAYERS=40
84
+ NHIDDEN=5120
85
+ NHEADS=32
86
+ FFN_HIDDEN_SIZE=20480
87
+
88
+ # --ffn_hidden_size $FFN_HIDDEN_SIZE \
89
+ GPT_ARGS=" \
90
+ --num-layers $NLAYERS \
91
+ --hidden-size $NHIDDEN \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --num-attention-heads $NHEADS \
94
+ [...]
95
+ "
96
+ ```
97
+
98
+ Sanity check:
99
+ ```
100
+ $ VOCAB_SIZE=50257 NLAYERS=40 NHIDDEN=5120 NHEADS=32 SEQ_LEN=2048; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')"
101
+ Model size: 13B
102
+ ```
103
+
104
+
105
+
106
+ ## Sequence Length
107
+
108
+ Default Megatron-LM language model with 2048 tokens sequence length
109
+
110
+ ```
111
+ SEQ_LEN=2048
112
+
113
+ --seq-length $SEQ_LEN \
114
+ --max-position-embeddings $SEQ_LEN \
115
+
116
+ ```
117
+
118
+
119
+ ## Global batch size
120
+
121
+ GBS = Global Batch Size
122
+
123
+ Use a schedule:
124
+
125
+ - start from 32k tokens (gbs=16)
126
+ - increase linearly to 2048k (gbs=1024) over 5M samples (for a total of ~10B tokens / 5k steps)
127
+ - then continue at 2048k (gbs=1024) for 145M samples (290B tokens / 145K steps)
128
+
129
+ Total: 300B tokens (150K steps)
130
+
131
+ Note: the training script wasn't updated when we flipped seqlen/gbs from 1024/2048 to 2048/1024, so we are currently planning to train for 300K steps (samples) and 600B tokens. But since longer doesn't impact anything, we will just stop at half the time. I updated the document to use the right 150K number so we don't repeat this mistake in the next training.
132
+
133
+ syntax:
134
+ ```
135
+ --rampup-batch-size <start batch size> <batch size increment> <ramp-up samples>
136
+ ```
137
+
138
+ At seqlen 2048 (1k tokens is bs=1), we get:
139
+
140
+ ```
141
+ --rampup-batch-size 16 16 5_000_000 \
142
+ --global-batch-size 1024 \
143
+ ```
144
+
145
+ This means it will start with global batch size 16 and over 63 (`(1024-16)/16`) intervals will increase the
146
+ batch size by 16 linearly to 1024.
147
+
148
+ 79365 (`5_000_000/63`) is the number of samples before the next GBS increment. That is we run at GBS=16 for 79365 samples, or 4960 steps (`79365/16`). Then we run at GBS=32 for 79365 samples, or 2480 steps. Then 1653 steps at GBS=48, 1240 at GBS=64, etc....
149
+
150
+ Notes:
151
+ * `--rampup-batch-size` requires the use of `--train-samples` and can't be used with `--train-iters`.
152
+ * global batch size has to be divisible by micro-batch-size * DP_SIZE
153
+
154
+ Important: the software will fail if GBS is not divisible by `MBS * DP_SIZE`.
155
+ Though Jared's recommendation is to use MBS=1 and then it's much easier to match GBS/DP_SIZE even at GBS=16.
156
+
157
+ `DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE)`
158
+
159
+ Since the increments are in GBS=16, we can do only DP_SIZE=16, which means that at most we can use 32 nodes (`32*4/(4*2)=16`).
160
+
161
+ Once GBS reaches 1024, we can use up to 8192 GPUs (1024*2*4), so we will be able to switch to 64 nodes or may be even 128 nodes (4 gpus each). We can't use any number of nodes between 64 and 128 though, because the number has to be 2**X. So 96 nodes won't work, because it has a multiplier of 3 there.
162
+
163
+
164
+
165
+
166
+ ## Checkpoints
167
+
168
+ We need the checkpoints:
169
+
170
+ 1. in order to be able to resume the training when the training is prematurely stopped for whatever reason.
171
+ 2. In addition a special saving schedule has been requested by the interpretabity group.
172
+
173
+ Because there are 3 different schedules, and Megatron-LM has only fixed checkpoint saving schedule, we will need 3 different run scripts, to be launched in a sequence, each starting once the previous has finished.
174
+
175
+ 1. steps 1-100 - 10 checkpoints, interval 10 steps
176
+ 2. steps 101-1000 - 50 checkpoints, interval 18 steps
177
+ 3. steps 1001-150K - 100+ checkpoints, interval 1500 steps
178
+ 4. if still needed, can continue with schedule 3
179
+
180
+ note: the interoperability study doesn't care for checkpoints in the range of 1k-20k, so we only save those to be able to restart the training.
181
+
182
+ It'd have been
183
+ ```
184
+ ROUND=1
185
+ if [[ ${ROUND} == 1 ]]; then TRAIN_ITER=100 SAVE_INTERVAL=10
186
+ elif [[ ${ROUND} == 2 ]]; then TRAIN_ITER=1000 SAVE_INTERVAL=18
187
+ elif [[ ${ROUND} == 3 ]]; then TRAIN_ITER=150000 SAVE_INTERVAL=1500
188
+ else echo "invalid ROUND: $ROUND"
189
+ fi
190
+ --train-iter $TRAIN_ITER \
191
+ --save-interval $SAVE_INTERVAL \
192
+ ```
193
+
194
+ Unfortunately, `--rampup-batch-size` can't work with `--train-iter` and we have to use `--train-samples` instead. It has to be fixed through all of trainings and can't be changed, otherwise resume from checkpoint will break.
195
+
196
+ So the only thing left is to use `--exit-interval` which is in steps.
197
+
198
+ Which gives us the three rounds:
199
+
200
+ ```
201
+ ROUND=1
202
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
203
+ elif [[ ${ROUND} == 2 ]]; then EXIT_INTERVAL=900 SAVE_INTERVAL=18
204
+ elif [[ ${ROUND} == 3 ]]; then SAVE_INTERVAL=1500
205
+ else echo "invalid ROUND: $ROUND"
206
+ fi
207
+
208
+ --train-samples 150_000_000 \
209
+ --exit-interval $EXIT_INTERVAL \
210
+ --save-interval $SAVE_INTERVAL \
211
+ ```
212
+
213
+ `--exit-interval` counts steps only for the current run, regardless of previous steps. So to stop at effective step 1000, the second round we tell it to exit at 900 (the first round did the first 100).
214
+
215
+ And unfortunately, this proved to be not supported by Megatron-LM either at the moment. There are a few possible ways to approach this:
216
+
217
+ 1. One approach is to simply use 3 independent trainings, while using the same `--seed ` and just have `--exit_interval` as above. Though after each training moving the checkpoints away.
218
+
219
+ 2.
220
+ XXX: Also megatron code could be extended to implement `--exit-samples` - so sample-based exit strategy
221
+
222
+ 3. Yet another approach is to do it manually. Kill the training after 100, and then restart and kill after 900 iterations, while changing the save interval, and manually fixing up the `checkpoints/latest` to point to the correct checkpoint - since the manual killing might have a few extra checkpoints. So the recipe to follow:
223
+
224
+ ```
225
+ ROUND=1
226
+ if [[ ${ROUND} == 1 ]]; then SAVE_INTERVAL=10
227
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=18
228
+ elif [[ ${ROUND} == 3 ]]; then SAVE_INTERVAL=1500
229
+ else echo "invalid ROUND: $ROUND"
230
+ fi
231
+
232
+ --train-samples 150_000_000 \
233
+ --save-interval $SAVE_INTERVAL \
234
+ ```
235
+
236
+ (could also do it with 3 parallel jobs by using the same seed!)
237
+
238
+ ```
239
+ --seed 42
240
+ ```
241
+
242
+ Therefore do this manually:
243
+
244
+ 0.
245
+ * delete the old checkpoints `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints`
246
+
247
+ 1.
248
+
249
+ * set to `ROUND=1`
250
+ * `sbatch tr1-13B-round1.slurm`
251
+ * run for 100+ steps
252
+ * scancel the job
253
+ * clean up `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` to remove any checkpoints beyond 100
254
+ * make sure `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/latest` contains 100
255
+
256
+
257
+ 2.
258
+
259
+ * set to `ROUND=2`
260
+ * `sbatch tr1-13B-round1.slurm`
261
+ * run for the additional 900+ steps (it's incremental, so the script already knows it started at 100)
262
+ * scancel the job
263
+ * clean up `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` to remove any checkpoints beyond 1000
264
+ * make sure `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/latest` contains 1000
265
+
266
+
267
+ 3.
268
+
269
+ * set to `ROUND=3`
270
+ * `sbatch tr1-13B-round1.slurm`
271
+ * run normally
272
+
273
+
274
+
275
+ Because it'd be potentially too demanding to export TBs of data and the intended users might not be even able to download all that data, most likely we will need to run the interpretabity post-analysis experiments on JZ and send the reports to those who need the reports.
276
+
277
+ Megatron-LM resumes from the most recent checkpoint by default. Does it need the exact path or does it auto-discover the latest checkpoint by default.
278
+
279
+ ```
280
+ --load path_to_check_point \
281
+ ```
282
+
283
+
284
+ Remi suggests 100TB on SCRATCH shouldn't be a problem.
285
+
286
+
287
+
288
+
289
+
290
+ ## Optimizer
291
+
292
+ - AdamW,  β1=0.9, β2=0.999 eps=1e−8
293
+ - learning rate:
294
+ * peak=1e-4
295
+ * warmup over 2000 steps
296
+ * cosine decay for learning rate down to 10% of its value, over 260B tokens (after 260 billion tokens, training continues at 10% of the original learning rate)
297
+ - clipping by global norm of 1 (as in GPT-3)
298
+ - weight decay of 0.1
299
+
300
+ We need lr-decay in samples, so tokens2samples = 260B / 2048 = 126_953_125
301
+
302
+ We need lr-warmup in samples, so doing the math again as in checkpoints
303
+
304
+ 2000=160*12+80
305
+
306
+ so we will get to 2000 in 216_320 samples `16*160*12*(12+1)/2+16*13*80`
307
+
308
+
309
+
310
+ ```
311
+ --optimizer adam \
312
+ --adam-beta1 0.9 \
313
+ --adam-beta2 0.999 \
314
+ --adam-eps 1e-8 \
315
+ --lr 1e-4 \
316
+ --min-lr 1e-5 \
317
+ --lr-decay-style cosine \
318
+ --lr-decay-samples 126_953_125 \
319
+ --lr-warmup-samples 216_320 \
320
+ --clip-grad 1.0 \
321
+ --weight-decay 1e-1 \
322
+ ```
323
+
324
+
325
+ ## Logging
326
+
327
+
328
+ For now enable all tensorboard features, later we might decide to not log it all.
329
+
330
+ We are logging:
331
+
332
+ - lr (enabled by default)
333
+ - bs (enabled)
334
+ - loss (always)
335
+ - loss-scale (log_loss) (enabled by default)
336
+ - grad-norm (always)
337
+ - num-zeros (always)
338
+ - param-norm (always)
339
+ - timers (enabled)
340
+ - validation loss (always)
341
+ - validation ppl (perplexity) (enabled)
342
+
343
+ almost all of these are also logged as a comparison to consumed_train_samples
344
+
345
+ XXX: nice to have:
346
+ - throughput - Tflops/gpu or tokens
347
+
348
+
349
+ **Tensorboard config**:
350
+
351
+ ```
352
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
353
+
354
+ --tensorboard-dir $TENSORBOARD_PATH \
355
+ --tensorboard-queue-size 5 \
356
+ --log-timers-to-tensorboard \
357
+ --log-batch-size-to-tensorboard \
358
+ --log-validation-ppl-to-tensorboard \
359
+ ```
360
+
361
+ **CodeCarbon config**:
362
+
363
+ ```
364
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
365
+
366
+ --codecarbon-dir $CODECARBON_PATH \
367
+ ```
368
+
369
+
370
+
371
+ **Training logs**
372
+
373
+ All training logs are piped into `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs/main_log.txt`.
374
+
375
+
376
+ ## Exporting
377
+
378
+ Before starting training create cloned git repos to where output data will go.
379
+
380
+ The last 4 should all be git repo clones
381
+ ```
382
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
383
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
384
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
385
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
386
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
387
+ ```
388
+
389
+ I created 4 repos at https://huggingface.co/bigscience/ and now we can clone those as the dirs data will be output into:
390
+
391
+ ```
392
+ cd $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
393
+ git clone https://huggingface.co/bigscience/tr1-13B-checkpoints checkpoints
394
+ git clone https://huggingface.co/bigscience/tr1-13B-tensorboard tensorboard
395
+ git clone https://huggingface.co/bigscience/tr1-13B-codecarbon codecarbon
396
+ git clone https://huggingface.co/bigscience/tr1-13B-logs logs
397
+ ```
398
+
399
+ If this is your first time running git-lfs on this system, you need to init it once:
400
+ ```
401
+ module load git-lfs
402
+ git lfs install
403
+ ```
404
+
405
+ Most of the data types we are going to sync will be large or huge, and most are already lfs-tracked by default, so no setup is required. Except our log file which too can grow large, so we need to set it up:
406
+
407
+ ```
408
+ cd logs
409
+ git-lfs track *.txt
410
+ git commit -m "large text files" .gitattributes
411
+ git push
412
+ ```
413
+
414
+ ### Cronjobs to auto-sync the hub
415
+
416
+ Now we just need a cronjob to automatically do for each type of data to export:
417
+
418
+ ```
419
+ cd checkpoints
420
+ git add */*.pt
421
+ git commit -am "new data"
422
+ git push
423
+ ```
424
+
425
+ This job is performed automatically by `hub-sync.py`. For full details see: [Automated upload to the hub](../../data/export.md#automated-upload-to-the-hub).
426
+
427
+ **Weights checkpoints**
428
+
429
+ Currently, we aren't exporting checkpoints.
430
+
431
+ **Tensorboard**
432
+
433
+ Here is the slurm script to sync the tensorboard data: [tr1-13B-hub-sync-tensorboard.slurm](./tr1-13B-hub-sync-tensorboard.slurm)
434
+
435
+ **CodeCarbon**
436
+
437
+ Currently the feature is not enabled, so there is nothing to log.
438
+
439
+ **Log of logs**
440
+
441
+ Let's also create a log of logs. We will pipe all the logs in there and also the various status reports - e.g. while SLURM is queued the training and it's not running.
442
+
443
+ Here is the slurm script to sync the raw logs data: [tr1-13B-hub-sync-logs.slurm](./tr1-13B-hub-sync-logs.slurm)
444
+
445
+ The main source of logs is the training scripts. The logs are gathered via
446
+ ```
447
+ $CMD ... 2>&1 | tee -a $LOGS_PATH/main_log.txt
448
+ ```
449
+ in the training slurm script.
450
+
451
+ XXX: we could also add various other diagnostics appended to the main log file. e.g. shared memory, etc.
452
+
453
+
454
+
455
+
456
+ ## Deepspeed config
457
+
458
+ Using Deepspeed's activation checkpointing to use a lot less GPU memory
459
+
460
+ ```
461
+ --deepspeed-activation-checkpointing \
462
+ ```
463
+
464
+ Possible extras:
465
+
466
+ - Enabling `"contiguous_memory_optimization": true,` can help to reduce memory fragmentation, but it requiressetting `number_checkpoints`. This should be set to be equal to number of transformer blocks per pipeline stage times the number of pipeline parallel stage. Samyam says: Full disclaimer: I have only used this with ZeRO but not with pipeline parallelism. But by setting the number_checkpoints as described, it should work for PP too. The benefit of using it is usually only apparent when running very close to the memory limit.
467
+
468
+
469
+
470
+ ## Dataset
471
+
472
+ - Full 304.2M version (529GB) : `$six_ALL_CCFRWORK/datasets-custom/oscar-en`
473
+ - Tiny 10K version (56M): `$six_ALL_CCFRWORK/datasets-custom/oscar-en-10k`
474
+
475
+ We are using English-only subset of [the OSCAR dataset](https://huggingface.co/datasets/oscar) with full documents (*not* individual sentences).
476
+
477
+ We have about 300M records in 1.2TB of jsonl data (about 3/4 of which are smaller than 1K tokens), which amounts to about 280B tokens (estimated at about 4.5chars/word).
478
+
479
+ Megatron's preprocessing tool indexes everything and then at training time the Dataloader serves chunks of the desired fixed sequence length (2048 tokens in our case).
480
+
481
+ For more information on the pre-processing process and various estimations see: [OSCAR](../../data/oscar/README.md).
482
+
483
+
484
+
485
+ ## Dealing with 20h SLURM limit
486
+
487
+ First, let's ensure we save a checkpoint just before SLURM kills the job
488
+
489
+ Let's try 19:50 1190=60*20-10
490
+
491
+ ```
492
+ --exit-duration-in-mins 1190 \
493
+ ```
494
+
495
+ For the bigger models 10min might not be long enoug to finish an iteration (assume the limit hits right as one starts) and write out a checkpoint.
496
+
497
+ Then we need to figure out how to schedule the next slurm job as soon as the currently running one is over in 20h.
498
+
499
+ We will use job arrays, to solve this. Let's start with just 10 such jobs:
500
+
501
+ ```
502
+ sbatch --array=1-10%1 tr1-13B-round1.slurm
503
+ ```
504
+
505
+ `%1` limits the number of simultaneously running tasks from this job array to 1, since we want them to run in a sequence.
506
+
507
+ Alternatively, as always this param can be part of the script:
508
+ ```
509
+ #SBATCH --array=1-10%1
510
+ ```
511
+
512
+ ## Crontab
513
+
514
+ JZ doesn't have a user-accessible crontab facility, so we have to emulate it with a self-restarting slurm job that polls some dir for new jobs to run. For full details on how this works please see [Crontab Jobs](../../jz/crontab/).
515
+
516
+ But to use it simply put your slurm scripts into either:
517
+ ```
518
+ $six_ALL_CCFRWORK/cron/cron.hourly
519
+ $six_ALL_CCFRWORK/cron/cron.daily
520
+ ```
521
+
522
+ and the jobs will be run on hourly or daily basis. This is similar to Linux's `/etc/cron.*` setup. Except the jobs aren't guaranteed to start on the hour, but should be around that time.
523
+
524
+ Currently we have:
525
+
526
+ ```
527
+ ls -1 $six_ALL_CCFRWORK/cron/cron.hourly/*slurm
528
+ tr1-13B-hub-sync-logs.slurm
529
+ tr1-13B-hub-sync-tensorboard.slurm
530
+ tr1-13B-slurm-status.slurm
531
+ ```
532
+
533
+ The first 2 sync log files to the hub and the last one monitors the health of the training and alerts of any problems.
534
+
535
+
536
+ ## Estimated run time
537
+
538
+ Best case scenario when training 24/7 on 64 nodes with 4 gpus each:
539
+ ```
540
+ $ python -c 'Btokens=300; Bmodel=13; n_gpus=256; Tflops=45; \
541
+ print(f"{Btokens*1e9*8*Bmodel*1e9/(n_gpus*Tflops*1e12*60*60*24):0.2f} days")'
542
+ 31.35 days
543
+ ```
544
+
545
+ You will find the detailed explanation of the estimation formula [here](../../math/README.md#estimate-model-training-time).
546
+
547
+ The training was much slower in the first 10k steps because of the batch size rampup, where the pipeline was very inefficient.
548
+
549
+ And then we were only able to use 20h slurm jobs, with unpredictable gaps of wait time in between (1-30 hours!), so it's impossible to predict when the finish line will be finished.
550
+
551
+
552
+ ## Memory usage
553
+
554
+ During training currently we use 256GB (8x 32GB gpus) per each full replica (TP=2 + PP=4), the rest are ZeRO-DP. So if we throw x times more GPUs we just speed things up by having more 2-node replicas.
555
+ The required memory breakdown:
556
+
557
+ 1. 4B for fp32 weights
558
+ 2. 2B for fp16 weights
559
+ 3. 8B for optimizer states.
560
+ 4. 4B for gradients (we don't save these in the checkpoint)
561
+ 5. plus memory for activations and temps, which total majorly depends on the seqlen and mini batch size - and since we use activation checkpointing this memory need is quite small.
562
+
563
+ Total: 234GB (18*13) plus activations and temps memory. So we are close to 256GB here.
564
+
565
+ Activation memory would have been much much bigger if it weren't for activation checkpointing.
566
+
567
+
568
+ ## Checkpoint Back Up
569
+
570
+ To copy multiple checkpoints excluding optimizer states. First move the desired checkpoints to back up to some dedicated dir, e.g. `tr1-13B-round2/checkpoints`, then copy just the needed files:
571
+
572
+ ```
573
+ srun -p prepost -A six@cpu --time=20:00:00 --pty bash
574
+ mkdir to-upload
575
+ rsync -acvhu --no-compress --info=progress2 --exclude "zero*pt" tr1-13B-round2/checkpoints/ to-upload
576
+ ```
577
+
578
+ then to back those up:
579
+
580
+ ```
581
+ cp -arun $six_ALL_CCFRSCRATCH/checkpoints/to-upload/* $six_ALL_CCFRSTORE/checkpoints/tr1-13B
582
+ ```
583
+
584
+
585
+ **Final checkpoint with optimizer states:**
586
+
587
+ ```
588
+ mkdir $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim
589
+ cp -arun $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/global_step168000 $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim/
590
+ ```
591
+
592
+ This is the final checkpoint, that can be resumed from at will:
593
+
594
+ ```
595
+ $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim/global_step168000
596
+ ```
597
+
598
+ Here is the corresponding log:
599
+ ```
600
+ iteration 168000/ 311541 | consumed samples: 153013584 | elapsed time per iteration (ms): 13248.2 | learning rate: 1.000E-05 | global batch size: 1024 | lm loss: 2.376641E+00 | loss scale: 131072.0 | grad norm: 19767.052 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
601
+ time (ms)
602
+ --------------------------------------------------------------------------------------------------
603
+ validation loss at iteration 168000 | lm loss value: 2.342049E+00 | lm loss PPL: 1.040253E+01 |
604
+ --------------------------------------------------------------------------------------------------
605
+ ```
606
+
607
+ ## Checkpoint Conversion and Upload
608
+
609
+
610
+ **Important**: there was a bug in the converter on the transformers side, so we need this fix:
611
+ https://github.com/huggingface/transformers/pull/13735
612
+ if it's not merged yet, install this branch first. If it's already merged just make sure you use `transformers@master` - XXX: I will update the script to require a specific version once a new version of transformers is released.
613
+
614
+
615
+ Open a long running interactive shell:
616
+ ```
617
+ srun -p compil --cpus-per-task=40 -A six@cpu --time=6:00:00 --pty bash
618
+ ```
619
+ then convert:
620
+
621
+ ```
622
+ cd $six_ALL_CCFRSCRATCH/checkpoints/to-upload
623
+ time find * -maxdepth 0 -type d -name "global_step*" -exec $six_ALL_CCFRWORK/code/Megatron-DeepSpeed/tools/convert_checkpoint/deepspeed_to_transformers.py --input_folder {} --output_folder hf-fixed/{} \;
624
+ ```
625
+
626
+ It takes about 100sec per 26GB checkpoint.
627
+
628
+ The results will be all under `hf/`.
629
+
630
+ Now to uploading to the hub.
631
+
632
+ Prepare the target dir:
633
+
634
+ ```
635
+ #git -c http.extraHeader="Authorization: Basic " clone https://huggingface.co/bigscience/tr1-13B-checkpoints/
636
+
637
+ cd tr1-13B-checkpoints
638
+
639
+
640
+ huggingface-cli lfs-enable-largefiles .
641
+
642
+ git config --unset user.email
643
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
644
+ ```
645
+ We are going to put each checkpoint into its own branch with the same name.
646
+
647
+ ```
648
+ mv ../hf/global_step* .
649
+ time find * -maxdepth 0 -type d -name "global_step*" -exec git checkout main \; -exec git checkout -b {} \; -exec git add {} \; -exec git commit -m "add {}" \; -exec git push --set-upstream origin {} \;
650
+ git checkout main
651
+ ```
652
+
653
+ Fixing up failed pushes / verifying that all pushes went through, re-pushing if needed
654
+
655
+ ```
656
+ git branch | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; git push --set-upstream origin $1]'
657
+ ```
658
+
659
+ If `git push` fails re-run with: `GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1 git push` to see what the actual error is.
660
+
661
+
662
+ OK, the branch-per-checkpoint hub repo proved to be very difficult to upload and even more so using it after the upload.
663
+
664
+ So let's try GCS bucket:
665
+
666
+ ```
667
+ gcloud auth login
668
+ gcloud config set project bigscience
669
+ gsutil cp -r hf-fixed/* gs://bigscience-backups/tr1-13B/checkpoints/
670
+
671
+ ```
672
+ or via rsync:
673
+ ```
674
+ gsutil -m rsync -r hf-fixed/* gs://bigscience-backups/tr1-13B/checkpoints/
675
+ ```
676
+
677
+ ```
678
+ start-prod
679
+ cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/
680
+ gsutil -m rsync -r hf-fixed1/* gs://bigscience-backups/tr1-13B/checkpoints/
681
+
682
+ ```
683
+
684
+ or if needed to speed up the upload via multiple parallel copies open 2 `srun` instances and in one:
685
+ ```
686
+ gsutil cp -r hf-fixed1/* gs://bigscience-backups/tr1-13B/checkpoints/
687
+ ```
688
+ and in another:
689
+ ```
690
+ gsutil cp -r hf-fixed2/* gs://bigscience-backups/tr1-13B/checkpoints/
691
+ ```
692
+
693
+ can't use `rsync` with multiple sources - can only rsync a single dir.
694
+
695
+ Later fixing `config.json` to include the correct `gelu_fast` activation correction and rsyncing the GCS bucket.
696
+
697
+ (moved all the hf-fixed sub-dirs into a new folder `checkpoints`)
698
+
699
+ ```
700
+ start-prod
701
+ cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/
702
+ perl -pi -e 's|gelu|gelu_fast|' checkpoints/*/config.json
703
+ gsutil -m rsync -x ".*bin$" -r checkpoints gs://bigscience-backups/tr1-13B/checkpoints
704
+ ```
705
+ this is really fast since we exclude the checkpoint files (`-x ".*bin$"`)
706
+
707
+
708
+ ## Other backups
709
+
710
+ Logs:
711
+
712
+ ```
713
+ mkdir $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/
714
+ tar -zcvf $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/tensorboard.tgz $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tensorboard
715
+ tar -zcvf $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/logs.tgz $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs
716
+ ```
717
+
718
+ note: codecarbon wasn't ready during this training, so nothing to back up there.
719
+
720
+
721
+ ## Exports
722
+
723
+ - GCS https://console.cloud.google.com/storage/browser/bigscience
724
+ - The Hub https://huggingface.co/bigscience
725
+
726
+
727
+ ## Training scripts
728
+
729
+ The training script is:
730
+
731
+ - [tr1-13B-round1.slurm](./tr1-13B-round1.slurm)
732
+
733
+ We also have:
734
+
735
+ - [tr1-13B-short.slurm](./tr1-13B-short.slurm)
736
+
737
+ which is a very small model to do quick testing and debug, but otherwise the same as the main script.
738
+
739
+ The scripts are located at:
740
+
741
+ ```
742
+ cd $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base
743
+ ```
744
+
745
+ When no jobs are scheduled, currently we launch the main training script using:
746
+
747
+ ```
748
+ sbatch --array=1-5%1 tr1-13B-round1.slurm
749
+ ```
750
+ This will schedule 5 20h-trainings which will run one at a time, once the scheduler yields to the request, with unknown wait time in between each job.
751
+
752
+ If there is a job running already, **do not use the above command** as we can't have 2 trainings overlap. If there is a training already running you can:
753
+
754
+ 1. either tell `sbatch` to start the new job once the currently running job succeeds, using:
755
+
756
+ ```
757
+ sbatch --dependency=CURRENTLY_RUNNING_JOB_ID --array=1-5%1 tr1-13B-round1.slurm
758
+ ```
759
+
760
+ Where `CURRENTLY_RUNNING_JOB_ID` is the job being reported running. For example if the report of the last job is:
761
+ ```
762
+ [2021-08-16 22:08:01] tr1-13B-round3 is running for 18:15:59 since 2021-08-16T03:52:02 (711114_4 on 'gpu_p13' partition (r7i4n[1-7],r7i7n[1-8],r8i0n0,r8i5n[3-8],r8i6n[0-8],r9i0n8,r9i1n[0-8],r9i2n[7-8],r9i3n[0-8],r9i4n[0-8],r9i5n[0-2])
763
+ ```
764
+ then the currently running job ID is `711114_4`. You can also gather the same info about the current scheduler status using `squeue`:
765
+
766
+ ```
767
+ squeue --user=$(getent group six | cut -d: -f4) | grep tr1-13B
768
+ ```
769
+
770
+ 2. you could also see how much time is left before the current job finished (based on training log files) and then pass that many hours to `sbatch`. For example, if the job has **less** than 2 hours to run, but more than 1 hour, you want to launch it `now+2hours` from now:
771
+
772
+ ```
773
+ sbatch --begin now+2hours --array=1-5%1 tr1-13B-round1.slurm
774
+ ```
775
+
776
+ Using `--dependency` may lead to shorter wait times, since if the time passed to `--begin` allows even for a few minutes of delay since the stopping of the last job, the scheduler may already start some other jobs even if their priority is lower than our job. That's because the scheduler ignores any jobs with `--begin` until the specified time arrives.
777
+
778
+
779
+ ## On Call
780
+
781
+ When a person is on call, they need to watch that the training is either running or scheduled to run. If neither is happening they need to schedule a new training. When this situation occurs the log file will report:
782
+
783
+ ```
784
+ ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG***
785
+ ```
786
+
787
+ An email alert is sent as well to `[email protected]`.
788
+
789
+
790
+ The next section explains how to watch the logs.
791
+
792
+
793
+ Other than waiting for the watchdog which runs once an hour, one can immediately see if anything is scheduled with:
794
+
795
+ ```
796
+ $six_ALL_CCFRWORK/code/tr1-13B/bigscience/tools/slurm-status.py --job-name tr1-13B-round3
797
+ ```
798
+
799
+ If for some reason the training is not scheduled or running, to schedule a new training:
800
+
801
+ ```
802
+ cd $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base
803
+ sbatch --array=1-5%1 tr1-13B-round1.slurm
804
+ ```
805
+
806
+ This will schedule a job array of 5 jobs of 20h each, so if all goes well, that's at least 4 days of not needing to do anything other than being on the lookout for potential crashes.
807
+
808
+ XXX: need a troubleshooting section, but elsewhere in the document that is not this training specific.
809
+
810
+ 1. if one of the nodes gets a corrupted gpu, and the training crashes there is a risk that the next job in the training will get allocated the same node, in which case it'll crash again. We need a method to identify which node is corrupted, report that to [email protected] so they know to fix it and exclude this node from the slurm job by adding a list of nodes to exclude as following:
811
+
812
+ ```
813
+ sbatch --exclude=r7i5n2,r7i5n6 ...
814
+ ```
815
+ but we currently have no way to identify which node is faulty. I think if we switch to pt-1.9.0 or higher where torch elastic replaces the usual launcher. Or we have to use dedicated log files per node via: `#SBATCH --output=%x-%j-%N.out`.
816
+
817
+
818
+ ## Watching the training logs
819
+
820
+ On JZ:
821
+ ```
822
+ tail -f $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs/main_log.txt
823
+ ```
824
+
825
+ Outside of JZ:
826
+ ```
827
+ perl -e '$u=shift; $b=0; while(1){($e)=qx[curl -sI $u]=~/content-length: (\d+)/; \
828
+ print qx[curl -sr $b-$e -L $u] if $e>$b; $b=$e; sleep 300}' \
829
+ https://huggingface.co/bigscience/tr1-13B-logs/resolve/main/main_log.txt
830
+ ```
831
+ Currently the updates happen hourly, so this is a delayed version of `tail -f`.
832
+
833
+
834
+ ## CodeCarbon
835
+
836
+
837
+ CodeCarbon wasn't ready until the training was over so we only did an additional 10h run to measure with and the to extrapolate to the whole training.
838
+
839
+ https://huggingface.co/bigscience/tr1-13B-codecarbon
840
+
841
+ This set of records captures the startup time and 2499 iterations in 2 records per gpu, since there was also an intermediary checkpoint saved half-way and we flush the CC records on each checkpoint saving.
842
+
843
+ The training had 168000 iterations. Therefore multiply the reported data by 67. This would be quite approximate since we were using 16 nodes when doing the ramp up, then 64 and only the last 3 weeks 128 nodes.
844
+
845
+ Caveat emptor: I'm not sure whether CC-reports overlap since each report is per gpu and I think they may be measuring the same thing, other than the gpu itself. So this requires research.
846
+
847
+ Each csv file contains a report for a single gpu/process. There are 512 reports.
848
+
849
+
850
+ ## Extras
train/tr1-13B-base/chronicles.md ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tr1-13B Chronicles
2
+
3
+ Notes on the training progress with a particular focus on any encountered problems and their diagnosis and solutions/prevention.
4
+
5
+ To follow the training progress charts, see: [tensorboard](https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard).
6
+
7
+ To follow the raw training logs see: [logs](https://huggingface.co/bigscience/tr1-13B-logs/).
8
+
9
+
10
+ ## Round1 SAVE_INTERVAL=10
11
+
12
+ NNODES=16
13
+
14
+ saved checkpoint each 10 steps
15
+
16
+ `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tr1-13B-round1/checkpoints`
17
+
18
+ 10 checkpoints (Every 10 steps 1-100) - 4TB
19
+
20
+ ## Round2 SAVE_INTERVAL=18
21
+
22
+ NNODES=16
23
+
24
+ moved the round1's checkpoints away
25
+
26
+ rerun from scratch with the same seed
27
+
28
+ saved checkpoint each 18 steps
29
+
30
+ `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tr1-13B-round2/checkpoints`
31
+
32
+ 51 checkpoints (Every 18 steps 101-1000) - 20TB
33
+
34
+
35
+ ## Round3 SAVE_INTERVAL=1500 NNODES=16
36
+
37
+ NNODES=16
38
+
39
+ moved the round2's checkpoints away
40
+
41
+ rerun from scratch with the same seed
42
+
43
+ saved checkpoint each 1500 steps
44
+
45
+ I did the full re-run because otherwise I couldn't separate the tensorboard logs - it is not possible to restart from a checkpoing using `TRAIN_ITER` or `EXIT_INTERVAL` which is not fixed.
46
+
47
+ now we started uploading tensorboard logs
48
+
49
+
50
+ ## Round3 SAVE_INTERVAL=1500 NNODES=32
51
+
52
+ Tried to switch to 64 nodes, but the training failed because GBS gets incremented by 16, which limits us to DP_SIZE=16 (with MBS=1) so we can do 32 nodes (128gpus at most).
53
+
54
+ ```
55
+ DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE)
56
+ 16 = 32*4/(4*2)
57
+ ```
58
+
59
+ will switch to 64 nodes once GBS reaches 1024.
60
+
61
+
62
+ The training then crashed with shared memory error after some 10h+ of training:
63
+ ```
64
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
65
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
66
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
67
+ ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm).
68
+ Traceback (most recent call last):
69
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 986, in _try_get_data
70
+ Traceback (most recent call last):
71
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 986, in _try_get_data
72
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/queue.py", line 179, in get
73
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/queue.py", line 179, in get
74
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/threading.py", line 306, in wait
75
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/threading.py", line 306, in wait
76
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
77
+ File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler
78
+ RuntimeError: DataLoader worker (pid 30882) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit.
79
+ RuntimeError
80
+ The above exception was the direct cause of the following exception:
81
+ : Traceback (most recent call last):
82
+ DataLoader worker (pid 30801) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. File "/gpfswork/rech/six/commun/code/Megatron-DeepSpeed/pretrain_gpt.py", line 215, in <module>
83
+ The above exception was the direct cause of the following exception:
84
+ Traceback (most recent call last):
85
+ File "/gpfswork/rech/six/commun/code/Megatron-DeepSpeed/pretrain_gpt.py", line 215, in <module>
86
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
87
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 144, in pretrain
88
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
89
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 144, in pretrain
90
+ iteration = train(forward_step_func,iteration = train(forward_step_func,
91
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 675, in train
92
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 675, in train
93
+ train_step(forward_step_func,
94
+ train_step(forward_step_func, File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 381, in train_step
95
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 381, in train_step
96
+ loss = model[0].train_batch(data_iter=data_iterator)
97
+ loss = model[0].train_batch(data_iter=data_iterator)
98
+ ```
99
+
100
+ Each node has 94GB of /dev/shm, so it's very strange that this happened.
101
+
102
+ ```
103
+ df -h | grep shm
104
+ tmpfs 94G 336K 94G 1% /dev/shm
105
+ ```
106
+ This is after 2h of training on one node. I wonder if the problem was on some specific node.
107
+
108
+ Though Remi checked that all nodes used by the training that crashed had this exact setup. And all reported %1 usage.
109
+
110
+
111
+
112
+ To continually diagnose the running nodes's shm memory usage:
113
+ ```
114
+ for ((;;)) { (srun --jobid 637799 --gres=gpu:0 $six_ALL_CCFRWORK/bin/report_shm_usage | grep -v "1%"); sleep 10; }
115
+ ```
116
+ after adjusting the jobid number.
117
+
118
+ where:
119
+ ```
120
+ cat $six_ALL_CCFRWORK/bin/report_shm_usage
121
+ #!/usr/bin/bash
122
+
123
+ # print shared memory usage with the host
124
+
125
+ echo $(hostname) $(df -h | grep /dev/shm)
126
+ ```
127
+
128
+ The shared memory is used by `DataLoader` workers. We just use the default `args.num_workers==2` and 94GB of shm available on each node is a huge amount of shared memory.
129
+
130
+ And given that we use TP+PP, a single node doesn't have DDP on it, so no multiproc on the local host. Currently one full model replica uses 2 full nodes (`TP*PP = 2*4 = 8`) So it's really a single Dataloader call per each 2 nodes. i.e. tiny tiny needs.
131
+
132
+ If this happens again, setting `args.num_workers==0` will stop using shared memory, but it'll impact the data loading speed.
133
+
134
+ Jared hasn't seen this problem in his experience.
135
+
136
+ So at the moment we don't know what happened.
137
+
138
+ 2 more 20h trainings have been run since then w/o any problems.
139
+
140
+ ## Checking the progress
141
+
142
+ Someone asked when the current training will complete:
143
+
144
+ Let's do math:
145
+
146
+ 1. we are currently going at 784 samples in 32 seconds, or 24.5 samples / sec
147
+ 2. roughly we have 145M samples to go, so at the current speed 32nodes if we manage to have 20h allocation every 24 hours we get about 82 days. (145_000_000/(20*60*60*24.5))
148
+ 3. we should reach GBS=1024 hopefully today and then we can crank up to 64nodes, which should roughly double the speed, so it'll take 41 days to complete if all goes well and we don't sit in the queue for more than 4 hours.
149
+ 4. we can dare to try 128 nodes, which would quadruple the speed and we should be done in about 20 days. It's hard to tell how quickly the SLURM scheduler will provide such a large allocation - if more than half-day of wait time, we are probably better off with 64 nodes.
150
+
151
+
152
+ ## Round3 SAVE_INTERVAL=1500 NNODES=64
153
+
154
+ Finally GBS is at 1024, so we can do 64 nodes. Clocking about 23-26 secs / iteration - the performance jumps around quite a lot from run to run. But we know that already about JZ - it's very unsteady and depends on network usage by others.
155
+
156
+ Created a dedicated branch `tr1-13B`, which allows further development w/o the risk of breaking the current training.
157
+
158
+ ## A huge lm loss spike
159
+
160
+ The training loss just jumped from ~3 to ~9
161
+ ```
162
+ iteration 29020/ 311541 | consumed samples: 10698064 | elapsed time per iteration (ms): 22306.6 | learning rate: 9.850E-05 | global batch size: 1024 | lm loss: 2.775923E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
163
+ time (ms)
164
+ iteration 29030/ 311541 | consumed samples: 10708304 | elapsed time per iteration (ms): 22336.4 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 2.772822E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
165
+ time (ms)
166
+ iteration 29040/ 311541 | consumed samples: 10718544 | elapsed time per iteration (ms): 22332.6 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 2.768131E+00 | loss scale: 65536.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
167
+ time (ms)
168
+ iteration 29050/ 311541 | consumed samples: 10728784 | elapsed time per iteration (ms): 22148.5 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 7.343709E+00 | loss scale: 8192.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
169
+ time (ms)
170
+ iteration 29060/ 311541 | consumed samples: 10739024 | elapsed time per iteration (ms): 22181.7 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 8.715872E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
171
+ time (ms)
172
+ iteration 29070/ 311541 | consumed samples: 10749264 | elapsed time per iteration (ms): 22107.1 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 7.654131E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
173
+ time (ms)
174
+ iteration 29080/ 311541 | consumed samples: 10759504 | elapsed time per iteration (ms): 22131.2 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 7.192470E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
175
+ time (ms)
176
+ iteration 29090/ 311541 | consumed samples: 10769744 | elapsed time per iteration (ms): 22119.2 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 6.849044E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
177
+ ```
178
+
179
+ You can see the spike at https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard
180
+
181
+ It took some 500 iterations to recover.
182
+
183
+ There was a second spike a bit later, half the first one this time and recovered very quickly.
184
+
185
+ We discussed why it may have happened, but we don't have any definitive answer.
186
+
187
+
188
+ ## Checkpoint bloat issue
189
+
190
+ We have an issue with per-layer checkpoints that are way bigger than they should be. They are 10x bigger than what they should be. After some research we discovered that `torch.save()` doesn't save the current view, but the whole tensor with its original tensor storage. So that's why were were getting 10x bigger files than the actual data in the per-layer checkpoints.
191
+
192
+ We need to `.clone()` the tensors before saving them. and then the checkpoint for layers is just modelsize*2 bytes. The reason they were bloated is because ZeRO-1 pre-allocated large tensor buffers for run-time optimization. So this needs to be fixed in Deepspeed's pipe checkpoing saving.
193
+
194
+ Also will write a script to fix the already-saved checkpoints to `clone` and re-save.
195
+
196
+
197
+ ## old NCCL
198
+
199
+ Discovered the NCCL was statically linked into the distributed pytorch and it's really old 2.7.9. Supposedly newer NCCL should help with OPA interlink performance. But that means we either need to switch to a more recent pytorch or build our own. This is not resolved yet.
200
+
201
+
202
+ ## Watchdog
203
+
204
+ We created a watchdog, that reports if we are running/scheduled and alerts if neither is happening. E.g. the recent log in the main log file was:
205
+
206
+ ```
207
+ iteration 33240/ 311541 | consumed samples: 15019344 | elapsed time per iteration (ms): 23491.4 | learning rate: 9.702E-05 | global batch size: 1024 | lm loss: 2.722675E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
208
+ time (ms)
209
+ saving checkpoint at iteration 33241 to /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints
210
+ [2021-08-08 01:00:44,221] [INFO] [logging.py:68:log_dist] [Rank 0] Saving model checkpoint: /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints/global_step33241/mp_rank_00_model_states.pt
211
+ successfully saved checkpoint at iteration 33241 to /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints
212
+ time (ms) | save-checkpoint: 57514.53
213
+ [exiting program after 1190.0357275923093 minutes] datetime: 2021-08-08 01:00:51
214
+ [2021-08-08 01:49:40] ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG***
215
+ [2021-08-08 02:49:44] ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG***
216
+ [2021-08-08 03:56:54] tr1-13B-round3 is scheduled to start in 3 days, 7:24:19 (at 2021-08-11T11:21:14) (682842_[1-5%1] on 'gpu_p13' partition)
217
+ ```
218
+
219
+ ## NNODES=96
220
+
221
+ We thoughts that trying more nodes would be a good idea, but 96 nodes proved to be unacceptable, since
222
+
223
+ GBS=1024 is not divisible by 384 (96*4), so there is no way to spread data evenly across all replicas.
224
+
225
+ We can only have either 256, 512 or 1024 gpus (64, 128, 256 nodes)
226
+
227
+ ## Corrupt GPU crashes the training multiple times
228
+
229
+ One of the array job trainings crashes after many hours of training:
230
+
231
+ ```
232
+ iteration 43680/ 311541 | consumed samples: 25709904 | elapsed time per iteration (ms): 25593.4 | learning rate: 9.135E-05 | global batch size: 1024 | lm loss: 2.635663E+00 | loss scale: 131072.0 | grad norm: 17224.723 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
233
+ time (ms)
234
+ Traceback (most recent call last):
235
+ File "/gpfswork/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/pretrain_gpt.py", line 222, in <module>
236
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
237
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 144, in pretrain
238
+ iteration = train(forward_step_func,
239
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 677, in train
240
+ train_step(forward_step_func,
241
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 381, in train_step
242
+ loss = model[0].train_batch(data_iter=data_iterator)
243
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 291, in train_batch
244
+ self._exec_schedule(sched)
245
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 1237, in _exec_schedule
246
+ self._exec_instr(**cmd.kwargs)
247
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 679, in _exec_backward_pass
248
+ torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
249
+ File "/gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/autograd/__init__.py", line 145, in backward
250
+ Variable._execution_engine.run_backward(
251
+ RuntimeError: transform: failed to synchronize: cudaErrorECCUncorrectable: uncorrectable ECC error encountered
252
+ terminate called after throwing an instance of 'c10::Error'
253
+ what(): CUDA error: uncorrectable ECC error encountered
254
+ Exception raised from create_event_internal at /opt/conda/conda-bld/pytorch_1616554793803/work/c10/cuda/CUDACachingAllocator.cpp:733 (most recent call first):
255
+ frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x42 (0x1500fb4d42f2 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so)
256
+ frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::string const&) + 0x5b (0x1500fb4d167b in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so)
257
+ frame #2: c10::cuda::CUDACachingAllocator::raw_delete(void*) + 0x809 (0x1500fb72d219 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
258
+ frame #3: c10::TensorImpl::release_resources() + 0x54 (0x1500fb4bc3a4 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so)
259
+ frame #4: <unknown function> + 0x6e0e5a (0x150152432e5a in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libtorch_python.so)
260
+ frame #5: <unknown function> + 0x6e0ef1 (0x150152432ef1 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libtorch_python.so)
261
+ frame #6: <unknown function> + 0x1a6b5a (0x56434fce9b5a in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
262
+ frame #7: <unknown function> + 0x110b7c (0x56434fc53b7c in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
263
+ frame #8: <unknown function> + 0x1105b9 (0x56434fc535b9 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
264
+ frame #9: <unknown function> + 0x1105a3 (0x56434fc535a3 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
265
+ frame #10: <unknown function> + 0x1105a3 (0x56434fc535a3 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
266
+ frame #11: <unknown function> + 0x177917 (0x56434fcba917 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
267
+ frame #12: PyDict_SetItemString + 0x4c (0x56434fcbd86c in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
268
+ frame #13: PyImport_Cleanup + 0xac (0x56434fd2f0ec in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
269
+ frame #14: Py_FinalizeEx + 0x79 (0x56434fd95589 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
270
+ frame #15: Py_RunMain + 0x1bc (0x56434fd988fc in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
271
+ frame #16: Py_BytesMain + 0x39 (0x56434fd98ce9 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
272
+ frame #17: __libc_start_main + 0xf3 (0x150183467873 in /lib64/libc.so.6)
273
+ frame #18: <unknown function> + 0x1f7847 (0x56434fd3a847 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python)
274
+ ```
275
+
276
+ Nobody was around to notice and slurm scheduler started the next training job in the array, and it crashed too this time right away on:
277
+
278
+ ```
279
+ > initializing tensor model parallel with size 2
280
+ > initializing pipeline model parallel with size 4
281
+ > setting random seeds to 42 ...
282
+ [2021-08-12 08:19:28,225] [INFO] [checkpointing.py:226:model_parallel_cuda_manual_seed] > initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 2760 and data parallel seed: 42
283
+ > compiling dataset index builder ...
284
+ make: Entering directory '/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/data'
285
+ make: Nothing to be done for 'default'.
286
+ make: Leaving directory '/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/data'
287
+ >>> done with dataset index builder. Compilation time: 0.338 seconds
288
+ > compiling and loading fused kernels ...
289
+ Traceback (most recent call last):
290
+ File "/gpfswork/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/pretrain_gpt.py", line 222, in <module>
291
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
292
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 95, in pretrain
293
+ initialize_megatron(extra_args_provider=extra_args_provider,
294
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/initialize.py", line 89, in initialize_megatron
295
+ _compile_dependencies()
296
+ File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/initialize.py", line 140, in _compile_dependencies
297
+ torch.distributed.barrier()
298
+ File "/gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/distributed/distributed_c10d.py", line 2420, in barrier
299
+ work = default_pg.barrier(opts=opts)
300
+ RuntimeError: CUDA error: out of memory
301
+ ```
302
+
303
+ We figured one of the gpus had a hardware problem. So it crashed the first time. And then the scheduler allocated the same node and of course, we crashed again.
304
+
305
+ We contacted JZ admins and indeed one of the nodes was faulty. The next training didn't hit this node and the training continued.
306
+
307
+ Unfortunately we currently don't have a way to correlate the exceptions to the hostname of the node that it happened on. It's really to have this feature available, since if we don't, we can keep on hitting the faulty node and it'll continue crashing the training. If we know the node's hostname we can exclude it from the `sbatch --exclude=node1,node2,... `.
308
+
309
+ update: At the moment we have to add `%N` to `#SBATCH --output=%x-%j-%N.out` and then each node will have is own log file and then we can tell which node has a corrupt GPU.
310
+
311
+ ## Really long wait time to get allocation
312
+
313
+ When a job gets queued we often see 3 days expected wait time before yielding, but most of the time the job comes through in several hours. Sometimes we have to wait for a really long time, like 30h, with scheduler bumping our job down multiple times. This is a big problem as it pushes the finish line away continuously. We aren't anywhere close to being able to train 24/7 despite having many hours allocated to us for this project.
314
+
315
+ Another problem is that within a project we don't have a way to give the main training job a higher priority than other jobs that we run in parallel on various experiments and small trainings. There really should be a way for a user to say, this is a high priority job amongst all other jobs of the same group. But we didn't find a way to do that.
316
+
317
+ ## Test suite added
318
+
319
+ A `Megatron-Deepspeed` test suite was finally added. It was odd Megatron-LM didn't have one in the first place, so we had to create our own.
320
+
321
+ Now need to find some hardware with 2 gpus to create a CI.
322
+
323
+ ## Reduced evaluation iterations
324
+
325
+ Noticed that somehow it was configured to run eval for 100 iterations, after discussion reduced it to 5, thus saving some resources. While validation iterations are much faster than training, this wasn't really needed.
326
+
327
+ ## NNODES=128
328
+
329
+ Taking advantage of August's holiday in France was able to switch to 128 nodes.
330
+
331
+ Observed a further drop in TFLOPs, since now we had even less microbatches to go around. This is because Global BS remained the same (GBS=1024) and we currently use 2 nodes for a single replica (TP=2 * TP=4). So with 128 nodes, we have 64 replicas, which leaves only GAS=16 per replica, and that's too little for an efficient pipeline. The idle bubble is too big.
332
+
333
+ The benchmarking/tune up was done with GAS=128 (GBS=1024/8) and that's where we were getting high TFLops.
334
+
335
+ Nevertheless, the training is going much faster now and we will catch up lost time quickly.
336
+
337
+ ## NCCL experiments
338
+
339
+ It was suggested that newer NCCL will lead to faster inter-node communication.
340
+
341
+
342
+ hypothesis that newer nccl should be faster on JZ, but the short experiments I run didn't support it. I get the same throughput with:
343
+
344
+ 1. pt=1.8.1, cuda=11.1, nccl=2708
345
+ 2. pt=1.9.0, cuda=11.1, nccl=2708
346
+ 3. pt=1.10.0.dev20210821, cuda=11.3, nccl=(2, 10, 3)
347
+
348
+ The experiment was run on the same 4-node allocation with GBS=64, but otherwise everything else was the same as the current training script. The speed was 17-17.5 secs per iteration. Did about 100 iterations.
349
+ So we will stick to pt=1.8.1 for now until a need arises to change that.
350
+
351
+ ## SLURM Job Arrays and Dependency
352
+
353
+ Switched to using SLURM Job Arrays and Dependency to schedule jobs. Since our account has a huge allocation we were able to start new 20h jobs with no delay.
354
+
355
+ If this approach is not used even a tiny delay between finishing one job and scheduling the next one often lead to 1-30 hours of wait time in the queue. This is because the scheduler was quick to allocate other jobs in the first few seconds of finishing the currently running job.
356
+
357
+ The problem remained if something goes wrong - e.g. a mistake in a script or some hardware issue, would lead to a delay in staring new jobs and a long long wait time.
358
+
359
+ This training was getting its software updated a lot as missing features were added, so it wasn't a super-stable polished production environment.
360
+
361
+ So as long as we had a stable setup using SLURM Job Arrays and Dependency chaining things went well. When we couldn't use those SLURM was delaying our training sometimes by a lot.
362
+
363
+ Also since we run secondary trainings we learned to use `--nice=10000` for those trainings. Without this method all slurm jobs of the same account had the same priority.
364
+
365
+ ## Added an alert email notification
366
+
367
+ Previously implemented watchdog now got hooked up to email notifications, so if it detected that no job was running or scheduled it'd let the group know.
368
+
369
+ ## Checkpoint bloat fixed
370
+
371
+ The Deepspeed team fixed the bloat in the checkpoints, so new checkpoints were taking 10x less space for layer weights.
372
+
373
+ I then processed all the old checkpoints to remove the bloat using:
374
+
375
+ ```
376
+ srun -p prepost -A six@cpu --time=20:00:00 --pty bash
377
+ wget https://raw.githubusercontent.com/stas00/toolbox/master/pytorch/pt-checkpoint-shrink.py
378
+ chmod a+x pt-checkpoint-shrink.py
379
+ cd checkpoints
380
+ find -type d -name "global_step*" -exec pt-checkpoint-shrink.py --checkpoint_dir {} --patterns "layer*pt" \;
381
+ ```
382
+
383
+ ## CI was added
384
+
385
+ A CI was implemented using EC2 instance on demand. With the help of https://github.com/machulav/ec2-github-runner
386
+
387
+ Eventually it proved to be not usable for PRs made from the forks, as EC2 needs secrets that github actions won't give to PRs not originating from the origin. So this CI is not very useful.
388
+
389
+
390
+ ## Training completed
391
+
392
+ On Sep 6th we reached the 300B tokens and on Sep 7th we stopped the training - It took some ~5 weeks to complete.
393
+
394
+
395
+ ## Checkpoint conversion
396
+
397
+ We still need to figure out how to make the checkpoint available in the HF `transformers` format. This is a work in progress.
398
+
399
+ Update: This has been done. All checkpoints converted to HF format and uploaded to HUB.
400
+
401
+ See [README.md](README.md) for nuances of the conversion.
402
+
403
+ Made a mistake in the activation function setting when writing the HF model after the conversion. It proved to be a complex situation but it needs to be `gelu_fast` on the HF side since we are using `args.openai_gelu = False; args.bias_gelu_res = True`. So applied fixes to the models on the HUB using the following:
404
+
405
+ ```
406
+ cd /gpfsssd/scratch/rech/six/commun/experiments/fix-config/
407
+ export GIT_LFS_SKIP_SMUDGE=1
408
+ git clone https://huggingface.co/bigscience/tr3e-1B3-c4-checkpoints
409
+ cd tr3e-1B3-c4-checkpoints
410
+ ~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*'
411
+ set +H
412
+ git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]'
413
+ export GIT_LFS_SKIP_SMUDGE=0
414
+ ```
415
+ Note using the trick of not checkout out LFS files since we only need to modify `config.json` which is a normal file - this is thousands times faster than normal checkout.
416
+
417
+
418
+
419
+ and for GCS:
420
+ ```
421
+ start-prod
422
+ cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/
423
+ perl -pi -e 's|gelu|gelu_fast|' checkpoints/*/config.json
424
+ gsutil -m rsync -x ".*bin$" -r checkpoints gs://bigscience-backups/tr1-13B/checkpoints
425
+ ```
train/tr1-13B-base/start-tr1-13B ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a python production script for JZ / tr1-13B training
2
+ #
3
+ # Activate with:
4
+ #
5
+ # source ./start-tr1-13B
6
+ #
7
+ #
8
+
9
+ # # if this session isn't run via a login shell, which is the case when running a
10
+ # # command which is not shell via ssh, the bash function `module` will be missing.
11
+ # # so work around it by emulating part of the login shell that loads modules environment
12
+ # if [ -z $(type -t module) ]
13
+ # then
14
+ # . /etc/profile.d/z_modules.sh
15
+ # fi
16
+ module purge
17
+ module load pytorch-gpu/py3/1.8.1
18
+ module load nvtop git-lfs github-cli mc
19
+
20
+ # git prompt
21
+ export GIT_PROMPT_ONLY_IN_REPO=0;
22
+ export GIT_PROMPT_THEME="JZPRod"
23
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
24
+
25
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
26
+ #
27
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets`
28
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/experiments`
29
+
30
+ # specific caches
31
+
32
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
33
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
34
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
35
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
36
+
37
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
38
+
39
+ ### CONDA ###
40
+
41
+ # >>> conda initialize >>>
42
+ # !! Contents within this block are managed by 'conda init' !!
43
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
44
+ if [ $? -eq 0 ]; then
45
+ eval "$__conda_setup"
46
+ else
47
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
48
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
49
+ else
50
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
51
+ fi
52
+ fi
53
+ unset __conda_setup
54
+ # <<< conda initialize <<<
55
+
56
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
57
+ conda activate tr1-13B
train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-hub-sync-logs # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=1:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=compil
10
+ #SBATCH --account=six@cpu
11
+
12
+ echo "START TIME: $(date)"
13
+
14
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
15
+ module load git-lfs
16
+
17
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
18
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
19
+ BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience
20
+
21
+ $BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.txt' -d
22
+
23
+ echo "END TIME: $(date)"
train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-hub-sync-tensorboard # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=2:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=compil
10
+ #SBATCH --account=six@cpu
11
+
12
+ echo "START TIME: $(date)"
13
+
14
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
15
+ module load git-lfs
16
+
17
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
18
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
19
+ BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience
20
+
21
+ $BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d
22
+
23
+ echo "END TIME: $(date)"
train/tr1-13B-base/tr1-13B-round1.slurm ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-round3
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=128
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
16
+
17
+ echo "START TIME: $(date)"
18
+
19
+ #ROUND=3
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
23
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
24
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
25
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
26
+
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/
28
+
29
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
30
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
31
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
32
+
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
36
+ MASTER_PORT=6000
37
+
38
+ GPUS_PER_NODE=4
39
+ NNODES=128 # switch to 128
40
+ TP_SIZE=2 # always fixed to the size of a single node
41
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
42
+ #DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer
43
+
44
+ # GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size
45
+ # GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed
46
+ MICRO_BATCH_SIZE=1
47
+ GLOBAL_BATCH_SIZE=1024
48
+
49
+ NLAYERS=40
50
+ NHIDDEN=5120
51
+ NHEADS=32
52
+ FFN_HIDDEN_SIZE=20480
53
+ SEQ_LEN=2048
54
+ VOCAB_SIZE=50257
55
+
56
+ SAVE_INTERVAL=1500
57
+
58
+ OPTIMIZER_ARGS=" \
59
+ --optimizer adam \
60
+ --adam-beta1 0.9 \
61
+ --adam-beta2 0.999 \
62
+ --adam-eps 1e-8 \
63
+ --lr 1e-4 \
64
+ --min-lr 1e-5 \
65
+ --lr-decay-style cosine \
66
+ --lr-decay-samples 126_953_125 \
67
+ --lr-warmup-samples 216_320 \
68
+ --clip-grad 1.0 \
69
+ --weight-decay 1e-1 \
70
+ "
71
+
72
+ EXIT_OPTS=" \
73
+ --exit-duration-in-mins 1190 \
74
+ "
75
+
76
+ GPT_ARGS=" \
77
+ --num-layers $NLAYERS \
78
+ --hidden-size $NHIDDEN \
79
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
80
+ --num-attention-heads $NHEADS \
81
+ --seq-length $SEQ_LEN \
82
+ --max-position-embeddings $SEQ_LEN \
83
+ --micro-batch-size $MICRO_BATCH_SIZE \
84
+ --rampup-batch-size 16 16 5_000_000 \
85
+ --global-batch-size $GLOBAL_BATCH_SIZE \
86
+ --train-samples 300_000_000 \
87
+ --vocab-file $VOCAB_FILE \
88
+ --merge-file $MERGE_FILE \
89
+ --loss-scale 12 \
90
+ --clip-grad 1.0 \
91
+ --fp16 \
92
+ --checkpoint-activations \
93
+ --seed 42
94
+ $OPTIMIZER_ARGS \
95
+ $EXIT_OPTS \
96
+ "
97
+
98
+ OUTPUT_ARGS=" \
99
+ --log-interval 10 \
100
+ --save-interval $SAVE_INTERVAL \
101
+ --eval-interval 1000 \
102
+ --eval-iters 5 \
103
+ --codecarbon-dir $CODECARBON_PATH \
104
+ --tensorboard-dir $TENSORBOARD_PATH \
105
+ --tensorboard-queue-size 5 \
106
+ --log-timers-to-tensorboard \
107
+ --log-batch-size-to-tensorboard \
108
+ --log-validation-ppl-to-tensorboard \
109
+ "
110
+
111
+ ZERO_STAGE=1
112
+
113
+ config_json="./ds_config.$SLURM_JOBID.json"
114
+
115
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
116
+ cat <<EOT > $config_json
117
+ {
118
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
119
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
120
+ "gradient_clipping": 1.0,
121
+ "zero_optimization": {
122
+ "stage": $ZERO_STAGE
123
+ },
124
+ "fp16": {
125
+ "enabled": true,
126
+ "loss_scale": 0,
127
+ "loss_scale_window": 500,
128
+ "hysteresis": 2,
129
+ "min_loss_scale": 1,
130
+ "initial_scale_power": 12
131
+ },
132
+ "steps_per_print": 2000,
133
+ "wall_clock_breakdown": false
134
+ }
135
+ EOT
136
+
137
+
138
+ DEEPSPEED_ARGS=" \
139
+ --deepspeed \
140
+ --deepspeed_config ${config_json} \
141
+ --zero-stage ${ZERO_STAGE} \
142
+ --deepspeed-activation-checkpointing \
143
+ "
144
+
145
+ export LAUNCHER="python -u -m torch.distributed.launch \
146
+ --nproc_per_node $GPUS_PER_NODE \
147
+ --nnodes $NNODES \
148
+ --master_addr $MASTER_ADDR \
149
+ --master_port $MASTER_PORT \
150
+ "
151
+
152
+ export CMD=" \
153
+ `pwd`/pretrain_gpt.py \
154
+ --tensor-model-parallel-size $TP_SIZE \
155
+ --pipeline-model-parallel-size $PP_SIZE \
156
+ $GPT_ARGS \
157
+ $OUTPUT_ARGS \
158
+ --save $CHECKPOINT_PATH \
159
+ --load $CHECKPOINT_PATH \
160
+ --data-path $DATA_PATH \
161
+ --data-impl mmap \
162
+ --split 949,50,1 \
163
+ --distributed-backend nccl \
164
+ $DEEPSPEED_ARGS \
165
+ "
166
+
167
+ echo $CMD
168
+
169
+ # to debug - add echo (it exits and prints what it would have launched)
170
+ clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
171
+
172
+ echo "END TIME: $(date)"
173
+
174
+ #
train/tr1-13B-base/tr1-13B-short.slurm ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-short
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=2
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+
14
+
15
+ # This is the same as the main script but pre-configured for a small model that can be easily tested
16
+ # on 1 or 2 nodes, which is handy to verify everything works before using it on the main training.
17
+ #
18
+ # It's also useful for pre-building megatron CUDA kernels if and when things get borked and it gets stuck in building kernels.
19
+ #
20
+ # Change to NNODES=1 if needed
21
+ #
22
+ # to allocate (change to 2 for NNODES=2)
23
+ # salloc --constraint=v100-32g --account=six@v100 --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash
24
+
25
+ source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B
26
+
27
+ set -x -e
28
+
29
+ echo "START TIME: $(date)"
30
+
31
+ #ROUND=3
32
+
33
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B-test
34
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
35
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
36
+ CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon
37
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
38
+
39
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/
40
+
41
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
42
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
43
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
44
+
45
+ cd $MEGATRON_DEEPSPEED_REPO
46
+
47
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
48
+ MASTER_PORT=6000
49
+
50
+ GPUS_PER_NODE=4
51
+ NNODES=2 # switch to 64
52
+ TP_SIZE=2 # always fixed to the size of a single node
53
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
54
+ #DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer
55
+
56
+ # GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size
57
+ # GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed
58
+ MICRO_BATCH_SIZE=1
59
+ GLOBAL_BATCH_SIZE=64
60
+
61
+ NLAYERS=8
62
+ NHIDDEN=512
63
+ NHEADS=8
64
+ FFN_HIDDEN_SIZE=2048
65
+ SEQ_LEN=512
66
+ VOCAB_SIZE=50257
67
+
68
+ SAVE_INTERVAL=2000
69
+
70
+
71
+ OPTIMIZER_ARGS=" \
72
+ --optimizer adam \
73
+ --adam-beta1 0.9 \
74
+ --adam-beta2 0.999 \
75
+ --adam-eps 1e-8 \
76
+ --lr 1e-4 \
77
+ --min-lr 1e-5 \
78
+ --lr-decay-style cosine \
79
+ --lr-decay-samples 126_953_125 \
80
+ --lr-warmup-samples 216_320 \
81
+ --clip-grad 1.0 \
82
+ --weight-decay 1e-1 \
83
+ "
84
+
85
+ EXIT_OPTS=" \
86
+ --exit-duration-in-mins 1190 \
87
+ "
88
+
89
+ GPT_ARGS=" \
90
+ --num-layers $NLAYERS \
91
+ --hidden-size $NHIDDEN \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --num-attention-heads $NHEADS \
94
+ --seq-length $SEQ_LEN \
95
+ --max-position-embeddings $SEQ_LEN \
96
+ --micro-batch-size $MICRO_BATCH_SIZE \
97
+ --rampup-batch-size 16 16 5_000_000 \
98
+ --global-batch-size $GLOBAL_BATCH_SIZE \
99
+ --train-samples 300_000_000 \
100
+ --vocab-file $VOCAB_FILE \
101
+ --merge-file $MERGE_FILE \
102
+ --loss-scale 12 \
103
+ --clip-grad 1.0 \
104
+ --fp16 \
105
+ --checkpoint-activations \
106
+ --seed 42
107
+ $OPTIMIZER_ARGS \
108
+ $EXIT_OPTS \
109
+ "
110
+
111
+ OUTPUT_ARGS=" \
112
+ --log-interval 10 \
113
+ --save-interval $SAVE_INTERVAL \
114
+ --eval-interval 1000 \
115
+ --eval-iters 5 \
116
+ --codecarbon-dir $CODECARBON_PATH \
117
+ --tensorboard-dir $TENSORBOARD_PATH \
118
+ --tensorboard-queue-size 5 \
119
+ --log-timers-to-tensorboard \
120
+ --log-batch-size-to-tensorboard \
121
+ --log-validation-ppl-to-tensorboard \
122
+ "
123
+
124
+ ZERO_STAGE=1
125
+
126
+ config_json="./ds_config.$SLURM_JOBID.json"
127
+
128
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
129
+ cat <<EOT > $config_json
130
+ {
131
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
132
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
133
+ "gradient_clipping": 1.0,
134
+ "zero_optimization": {
135
+ "stage": $ZERO_STAGE
136
+ },
137
+ "fp16": {
138
+ "enabled": true,
139
+ "loss_scale": 0,
140
+ "loss_scale_window": 500,
141
+ "hysteresis": 2,
142
+ "min_loss_scale": 1,
143
+ "initial_scale_power": 12
144
+ },
145
+ "steps_per_print": 2000,
146
+ "wall_clock_breakdown": false
147
+ }
148
+ EOT
149
+
150
+
151
+ DEEPSPEED_ARGS=" \
152
+ --deepspeed \
153
+ --deepspeed_config ${config_json} \
154
+ --zero-stage ${ZERO_STAGE} \
155
+ --deepspeed-activation-checkpointing \
156
+ "
157
+
158
+ export LAUNCHER="python -u -m torch.distributed.launch \
159
+ --nproc_per_node $GPUS_PER_NODE \
160
+ --nnodes $NNODES \
161
+ --master_addr $MASTER_ADDR \
162
+ --master_port $MASTER_PORT \
163
+ "
164
+
165
+ # /usr/bin/env PYTHONPATH="." `pwd`/pretrain_gpt.py \
166
+ export CMD=" \
167
+ `pwd`/pretrain_gpt.py \
168
+ --tensor-model-parallel-size $TP_SIZE \
169
+ --pipeline-model-parallel-size $PP_SIZE \
170
+ $GPT_ARGS \
171
+ $OUTPUT_ARGS \
172
+ --save $CHECKPOINT_PATH \
173
+ --load $CHECKPOINT_PATH \
174
+ --data-path $DATA_PATH \
175
+ --data-impl mmap \
176
+ --split 949,50,1 \
177
+ --distributed-backend nccl \
178
+ $DEEPSPEED_ARGS \
179
+ "
180
+
181
+ echo $CMD
182
+
183
+ # to debug - add echo (it exits and prints what it would have launched)
184
+ clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
185
+ #2>&1 | tee -a $LOGS_PATH/main_log.txt
186
+
187
+ echo "END TIME: $(date)"
188
+
189
+ #
train/tr1-13B-base/tr1-13B-slurm-status.slurm ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr1-13B-slurm-status # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=0:30:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=compil
10
+ #SBATCH --account=six@cpu
11
+
12
+ echo "START TIME: $(date)"
13
+
14
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B
15
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
16
+ MAIN_LOG_FILE=$LOGS_PATH/main_log.txt
17
+ BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience
18
+ # XXX: fix me on the next schedule when the name is fixed to remove .slurm
19
+ WATCH_SLURM_NAME=tr1-13B-round3
20
+
21
+ $BIG_SCIENCE_REPO_PATH/tools/slurm-status.py --job-name $WATCH_SLURM_NAME 2>&1 | tee -a $MAIN_LOG_FILE
22
+
23
+ echo "END TIME: $(date)"
train/tr10-13B-ml/README.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tr10 13B ML
2
+
3
+
4
+ ## setup/tune up
5
+
6
+
7
+ To interactively tune up the setup:
8
+
9
+ ```
10
+ salloc --constraint=v100-32g --account=six@v100 --nodes=4 --ntasks=4 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=120 bash --rcfile $six_ALL_CCFRWORK/code/tr10-13B/bigscience/train/tr10-13B-ml/start-tr10-13B
11
+ ```
12
+
13
+
14
+ Conda setup:
15
+
16
+ ```
17
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
18
+
19
+ conda create -y -n tr10-13B python=3.8
20
+ conda activate tr10-13B
21
+
22
+ pip3 install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
23
+
24
+ mkdir $six_ALL_CCFRWORK/code/tr10-13B
25
+ cd $six_ALL_CCFRWORK/code/tr10-13B
26
+
27
+ cd $six_ALL_CCFRWORK/code/tr10-13B/apex
28
+ ./build.sh
29
+
30
+ pip install deepspeed
31
+
32
+ cd $six_ALL_CCFRWORK/code/tr10-13B/DeepSpeed
33
+ ./build.sh
34
+
35
+ pip install transformers
36
+
37
+ cd $six_ALL_CCFRWORK/code/tr10-13B/transformers
38
+ pip install -e .
39
+
40
+ cd $six_ALL_CCFRWORK/code/tr10-13B/megatron-lm
41
+ pip install -r requirements.txt
42
+ ```
43
+
44
+ Env setup script to be `source start-tr10-13B` [start-tr10-13B](./start-tr10-13B)
45
+
46
+
47
+
48
+ configs:
49
+
50
+ works:
51
+ ```
52
+ NNODES=4
53
+ TP_SIZE=4
54
+ PP_SIZE=4
55
+ ```
56
+
57
+
58
+ tokenizer
59
+
60
+ It's at https://huggingface.co/teven/test_150k_vocab_tokenizer/tree/main !
61
+
62
+ So instead of running with :
63
+ ```
64
+ --vocab-file $VOCAB_FILE \
65
+ --merge-file $MERGE_FILE \
66
+ ```
67
+
68
+ You should run with:
69
+ ```
70
+ --tokenizer-type PretrainedFromHF \
71
+ --tokenizer-name-or-path teven/test_150k_vocab_tokenizer \
72
+ ```
73
+ 
74
+ Preprocessed a c4 10k samples, you can use it with:
75
+ ```
76
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/150k_vocab_size_test/c4_10k_samples_150k_vocab_size
77
+ ```
78
+
79
+ ## Config
80
+
81
+
82
+ Julien Launay:
83
+
84
+ (1) the main difference will be multilinguality, and the larger vocabulary.
85
+ (2) For PrefixLM, we are not sure yet, as for now prefix is underperforming the vanilla model + it has some quirks. Thomas is working on a potential fix. We will keep you updated, but I think you can start working without prefix.
86
+ (3) Embeddings. ALiBi is still underperforming all others. Maybe we could consider going with rotary? @Iz Beltagy what's your opinion on this? Rotary probably won't change significantly your benchmark, but will degrade performance by a few percents across the board.
87
+ we don’t have a conclusive answer yet but both shouldn’t affect model size. If any, they will make the model a tiny bit smaller
88
+ (4) Activation. We need to evaluate the GeGLU run. GeGLU would bring a significant change to the size of the MLPs, which would be significant for your benchmark.
89
+ it shouldn’t change the overall model size but will change the size of some of the FF layers so might change how TP works
90
+
91
+ ### `--init-method-std`
92
+
93
+ `--init-method-std 0.00884`
94
+
95
+ We derived this from: `NHIDDEN=5120`
96
+
97
+ `0.00884 = sqrt(2/(5120*5))` (from the ScaleNorm paper https://arxiv.org/abs/1910.05895)
98
+
99
+ ### `NHEADS`
100
+
101
+ NHEADS=40, why...
102
+
103
+ ### `--embed-layernorm`
104
+
105
+ We want this because it solved the problems with the 104B training instabilities.
106
+
107
+ If we choose to train with it, we will need to add the additional code to for the HF GPT2 converted model.
108
+
109
+ ### `--partition-activations`
110
+
111
+ can be used to shard activations across gpus to save more gpu memory
train/tr10-13B-ml/start-tr10-13B ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a python production script for JZ / tr10-13B training
2
+ #
3
+ # Activate with:
4
+ #
5
+ # source ./start-tr10-13B
6
+ #
7
+ #
8
+
9
+ # # if this session isn't run via a login shell, which is the case when running a
10
+ # # command which is not shell via ssh, the bash function `module` will be missing.
11
+ # # so work around it by emulating part of the login shell that loads modules environment
12
+ # if [ -z $(type -t module) ]
13
+ # then
14
+ # . /etc/profile.d/z_modules.sh
15
+ # fi
16
+ module purge
17
+ module load pytorch-gpu/py3/1.9.0
18
+ module load nvtop git-lfs github-cli mc
19
+
20
+ # git prompt
21
+ export GIT_PROMPT_ONLY_IN_REPO=0;
22
+ export GIT_PROMPT_THEME="JZPRod"
23
+ source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh
24
+
25
+ # We are using common disk spaces for datasets, caches, and experiment dumps:
26
+ #
27
+ #- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets`
28
+ #- Experiment dumps -> `$six_ALL_CCFRWORK/experiments`
29
+
30
+ # specific caches
31
+
32
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
33
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
34
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
35
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
36
+
37
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
38
+
39
+ ### CONDA ###
40
+
41
+ # >>> conda initialize >>>
42
+ # !! Contents within this block are managed by 'conda init' !!
43
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
44
+ if [ $? -eq 0 ]; then
45
+ eval "$__conda_setup"
46
+ else
47
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
48
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
49
+ else
50
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
51
+ fi
52
+ fi
53
+ unset __conda_setup
54
+ # <<< conda initialize <<<
55
+
56
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
57
+ conda activate base
58
+ conda activate tr10-13B
train/tr10-13B-ml/tr10-13B.slurm ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr10-13B
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=4
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/code/tr10-13B/bigscience/train/tr10-13B-ml/start-tr10-13B
16
+
17
+ echo "START TIME: $(date)"
18
+
19
+ export HF_DATASETS_OFFLINE=1
20
+ export TRANSFORMERS_OFFLINE=1
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr10-13B
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr10-13B-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
26
+ LOGS_PATH=$REPO_PATH/logs
27
+ mkdir -p $LOGS_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr10-13B/Megatron-DeepSpeed
30
+
31
+ TOKENIZER_NAME=teven/test_150k_vocab_tokenizer
32
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/150k_vocab_size_test/c4_10k_samples_150k_vocab_size
33
+
34
+ cd $MEGATRON_DEEPSPEED_REPO
35
+
36
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
37
+ MASTER_PORT=6000
38
+
39
+ GPUS_PER_NODE=4
40
+ NNODES=4 # switch to 128
41
+ TP_SIZE=2 # always fixed to the size of a single node
42
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
43
+
44
+ MICRO_BATCH_SIZE=1
45
+ GLOBAL_BATCH_SIZE=2048
46
+
47
+ NLAYERS=40
48
+ NHIDDEN=5120
49
+ NHEADS=32
50
+ SEQ_LEN=2048
51
+ VOCAB_SIZE=150000
52
+
53
+ SAVE_INTERVAL=300
54
+
55
+ OPTIMIZER_ARGS=" \
56
+ --optimizer adam \
57
+ --adam-beta1 0.9 \
58
+ --adam-beta2 0.95 \
59
+ --adam-eps 1e-8 \
60
+ --lr 6e-5 \
61
+ --min-lr 6e-6 \
62
+ --lr-decay-style cosine \
63
+ --lr-decay-samples 126_953_125 \
64
+ --lr-warmup-samples 216_320 \
65
+ --clip-grad 1.0 \
66
+ --weight-decay 1e-1 \
67
+ "
68
+
69
+ EXIT_OPTS=" \
70
+ --exit-duration-in-mins 1190 \
71
+ "
72
+
73
+
74
+ GPT_ARGS=" \
75
+ --num-layers $NLAYERS \
76
+ --hidden-size $NHIDDEN \
77
+ --num-attention-heads $NHEADS \
78
+ --seq-length $SEQ_LEN \
79
+ --max-position-embeddings $SEQ_LEN \
80
+ --micro-batch-size $MICRO_BATCH_SIZE \
81
+ --rampup-batch-size 16 16 6_000_000 \
82
+ --global-batch-size $GLOBAL_BATCH_SIZE \
83
+ --train-samples 300_000_000 \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME \
86
+ --loss-scale 12 \
87
+ --init-method-std 0.00884 \
88
+ --fp16 \
89
+ --checkpoint-activations \
90
+ --seed 43 \
91
+ $OPTIMIZER_ARGS \
92
+ $EXIT_OPTS \
93
+ "
94
+
95
+ OUTPUT_ARGS=" \
96
+ --log-interval 1 \
97
+ --save-interval $SAVE_INTERVAL \
98
+ --eval-interval 1000 \
99
+ --eval-iters 5 \
100
+ --tensorboard-dir $TENSORBOARD_PATH \
101
+ --tensorboard-queue-size 5 \
102
+ --log-timers-to-tensorboard \
103
+ --log-batch-size-to-tensorboard \
104
+ --log-validation-ppl-to-tensorboard \
105
+ --log-level info \
106
+ --log-level-replica error \
107
+ "
108
+
109
+ ZERO_STAGE=1
110
+
111
+ config_json="./ds_config.$SLURM_JOBID.json"
112
+
113
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
114
+ cat <<EOT > $config_json
115
+ {
116
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
117
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
118
+ "gradient_clipping": 1.0,
119
+ "zero_optimization": {
120
+ "stage": $ZERO_STAGE
121
+ },
122
+ "fp16": {
123
+ "enabled": true,
124
+ "loss_scale": 0,
125
+ "loss_scale_window": 500,
126
+ "hysteresis": 2,
127
+ "min_loss_scale": 1,
128
+ "initial_scale_power": 12
129
+ },
130
+ "steps_per_print": 2000,
131
+ "wall_clock_breakdown": false
132
+ }
133
+ EOT
134
+
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config ${config_json} \
139
+ --zero-stage ${ZERO_STAGE} \
140
+ --deepspeed-activation-checkpointing \
141
+ "
142
+
143
+ # export LAUNCHER="python -u -m torch.distributed.launch \
144
+ # --nproc_per_node $GPUS_PER_NODE \
145
+ # --nnodes $NNODES \
146
+ # --master_addr $MASTER_ADDR \
147
+ # --master_port $MASTER_PORT \
148
+ # "
149
+
150
+ export LAUNCHER="python -u -m torch.distributed.run \
151
+ --nproc_per_node $GPUS_PER_NODE \
152
+ --nnodes $NNODES \
153
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
154
+ --rdzv_backend c10d \
155
+ --max_restarts 0 \
156
+ "
157
+
158
+ export CMD=" \
159
+ `pwd`/pretrain_gpt.py \
160
+ --tensor-model-parallel-size $TP_SIZE \
161
+ --pipeline-model-parallel-size $PP_SIZE \
162
+ $GPT_ARGS \
163
+ $OUTPUT_ARGS \
164
+ --save $CHECKPOINT_PATH \
165
+ --load $CHECKPOINT_PATH \
166
+ --data-path $DATA_PATH \
167
+ --data-impl mmap \
168
+ --split 900,100,0 \
169
+ --distributed-backend nccl \
170
+ $DEEPSPEED_ARGS \
171
+ "
172
+
173
+ export OMP_NUM_THREADS=1 # shut up the launcher warnings
174
+
175
+ echo $CMD
176
+
177
+ # to debug - add echo (it exits and prints what it would have launched)
178
+ clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
179
+
180
+ echo "END TIME: $(date)"
181
+
182
+ #
train/tr12-1B3-oscar/tr12a-1B3-oscar-en-filtered.slurm ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3-oscar-en-filtered.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%a-%j.out # output file name
11
+ #SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+ #SBATCH --mail-type=ALL
14
+ #SBATCH [email protected]
15
+
16
+ set -x -e
17
+
18
+
19
+ ROUND=2
20
+ TESTING=0
21
+
22
+ EXPERIMENT_NAME=tr12a-1B3-oscar-en-filtered
23
+ OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/
24
+ CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints
25
+ REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
27
+ CODECARBON_PATH=$REPO_PATH/codecarbon
28
+ LOGS_PATH=$REPO_PATH/logs
29
+
30
+ MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed
31
+
32
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
33
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
34
+ DATA_PATH=/gpfsscratch/rech/six/urd43gx/oscar_en_v2_filtered/preprocessed_data_megatron/oscar_filtered_en_text_document
35
+
36
+ # defining the right environment variables
37
+ source $six_ALL_CCFRWORK/start-prod
38
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
39
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
40
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
41
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
42
+ export HF_DATASETS_OFFLINE=1
43
+ export TRANSFORMERS_OFFLINE=1
44
+ cd $MEGATRON_DEEPSPEED_REPO
45
+
46
+ # so processes know who to talk to
47
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
48
+ MASTER_PORT=6000
49
+
50
+ # adjust depending on the number of the nodes
51
+
52
+ # XXX: edit me
53
+ GPUS_PER_NODE=4
54
+ NNODES=16
55
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
56
+ TP_SIZE=1 # always fixed to the size of a single node
57
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
58
+
59
+ MICRO_BATCH_SIZE=1
60
+ GLOBAL_BATCH_SIZE=512
61
+ TRAIN_ITER=146_484_375
62
+
63
+ NLAYERS=24
64
+ NHIDDEN=2048
65
+ NHEADS=16
66
+ FFN_HIDDEN_SIZE=8192
67
+ SEQ_LEN=2048
68
+
69
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
70
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
71
+ else echo "invalid ROUND: $ROUND"
72
+ fi
73
+
74
+ OPTIMIZER_ARGS=" \
75
+ --optimizer adam \
76
+ --adam-beta1 0.9 \
77
+ --adam-beta2 0.999 \
78
+ --adam-eps 1e-8 \
79
+ --lr 2e-4 \
80
+ --min-lr 1e-5 \
81
+ --lr-decay-style cosine \
82
+ --lr-decay-samples 146_484_375 \
83
+ --lr-warmup-samples 183_105 \
84
+ --clip-grad 1.0 \
85
+ --weight-decay 1e-1 \
86
+ "
87
+
88
+ EXIT_OPTS=" \
89
+ --exit-duration-in-mins 1190 \
90
+ "
91
+
92
+ GPT_ARGS=" \
93
+ --num-layers $NLAYERS \
94
+ --hidden-size $NHIDDEN \
95
+ --num-attention-heads $NHEADS \
96
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
97
+ --seq-length $SEQ_LEN \
98
+ --max-position-embeddings $SEQ_LEN \
99
+ --micro-batch-size $MICRO_BATCH_SIZE \
100
+ --global-batch-size $GLOBAL_BATCH_SIZE \
101
+ --rampup-batch-size 32 32 2_000_000 \
102
+ --train-samples $TRAIN_ITER \
103
+ --vocab-file $VOCAB_FILE \
104
+ --merge-file $MERGE_FILE \
105
+ --loss-scale 12 \
106
+ --clip-grad 1.0 \
107
+ --fp16 \
108
+ --checkpoint-activations \
109
+ $OPTIMIZER_ARGS \
110
+ $EXIT_OPTS \
111
+ "
112
+
113
+ OUTPUT_ARGS=" \
114
+ --log-interval 200 \
115
+ --save-interval $SAVE_INTERVAL \
116
+ --eval-interval 1000 \
117
+ --eval-iters 100 \
118
+ --tensorboard-dir $TENSORBOARD_PATH \
119
+ --tensorboard-queue-size 5 \
120
+ --log-timers-to-tensorboard \
121
+ --log-batch-size-to-tensorboard \
122
+ --log-validation-ppl-to-tensorboard \
123
+ "
124
+
125
+ ZERO_STAGE=1
126
+
127
+ config_json="./ds_config.$SLURM_JOBID.json"
128
+
129
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
130
+ cat <<EOT > $config_json
131
+ {
132
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
133
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
134
+ "gradient_clipping": 1.0,
135
+ "zero_optimization": {
136
+ "stage": $ZERO_STAGE
137
+ },
138
+ "fp16": {
139
+ "enabled": true,
140
+ "loss_scale": 0,
141
+ "loss_scale_window": 500,
142
+ "hysteresis": 2,
143
+ "min_loss_scale": 1,
144
+ "initial_scale_power": 12
145
+ },
146
+ "steps_per_print": 2000,
147
+ "wall_clock_breakdown": false
148
+ }
149
+ EOT
150
+
151
+
152
+ DEEPSPEED_ARGS=" \
153
+ --deepspeed \
154
+ --deepspeed_config ${config_json} \
155
+ --zero-stage ${ZERO_STAGE} \
156
+ --deepspeed-activation-checkpointing \
157
+ "
158
+
159
+ export LAUNCHER="python -u -m torch.distributed.launch \
160
+ --nproc_per_node $GPUS_PER_NODE \
161
+ --nnodes $NNODES \
162
+ --master_addr $MASTER_ADDR \
163
+ --master_port $MASTER_PORT \
164
+ "
165
+
166
+ export CMD=" \
167
+ `pwd`/pretrain_gpt.py \
168
+ --tensor-model-parallel-size $TP_SIZE \
169
+ --pipeline-model-parallel-size $PP_SIZE \
170
+ $GPT_ARGS \
171
+ $OUTPUT_ARGS \
172
+ --save $CHECKPOINT_PATH \
173
+ --load $CHECKPOINT_PATH \
174
+ --data-path $DATA_PATH \
175
+ --data-impl mmap \
176
+ --split 949,50,1 \
177
+ --distributed-backend nccl \
178
+ $DEEPSPEED_ARGS \
179
+ "
180
+
181
+
182
+ # # clear old checkpoint as it'd mismatch while we sort things out
183
+ # rm -rf $SAVE_CHECKPOINT_PATH
184
+
185
+
186
+ echo $CMD
187
+
188
+ # We create the folder where the logs and codecarbon will be stored.
189
+ mkdir -p $LOGS_PATH
190
+ # to debug - add echo (it exits and prints what it would have launched)
191
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
train/tr12-1B3-oscar/tr12b-1B3-oscar-en-filtered-dedup.slurm ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3-oscar-en-filtered-dedup.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%a-%j.out # output file name
11
+ #SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+ #SBATCH --mail-type=ALL
14
+ #SBATCH [email protected]
15
+
16
+ set -x -e
17
+
18
+
19
+ ROUND=2
20
+ TESTING=0
21
+
22
+ EXPERIMENT_NAME=tr12b-1B3-oscar-en-filtered-dedup
23
+ OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/
24
+ CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints
25
+ REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
27
+ CODECARBON_PATH=$REPO_PATH/codecarbon
28
+ LOGS_PATH=$REPO_PATH/logs
29
+
30
+ MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed
31
+
32
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
33
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
34
+ DATA_PATH=/gpfsscratch/rech/six/urd43gx/oscar_en_v2_filtered/preprocessed_data_megatron_filtered_dedup_en/oscar_filtered_dedup_en_text_document
35
+
36
+ # defining the right environment variables
37
+ source $six_ALL_CCFRWORK/start-prod
38
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
39
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
40
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
41
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
42
+ export HF_DATASETS_OFFLINE=1
43
+ export TRANSFORMERS_OFFLINE=1
44
+ cd $MEGATRON_DEEPSPEED_REPO
45
+
46
+ # so processes know who to talk to
47
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
48
+ MASTER_PORT=6000
49
+
50
+ # adjust depending on the number of the nodes
51
+
52
+ # XXX: edit me
53
+ GPUS_PER_NODE=4
54
+ NNODES=16
55
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
56
+ TP_SIZE=1 # always fixed to the size of a single node
57
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
58
+
59
+ MICRO_BATCH_SIZE=1
60
+ GLOBAL_BATCH_SIZE=512
61
+ TRAIN_ITER=146_484_375
62
+
63
+ NLAYERS=24
64
+ NHIDDEN=2048
65
+ NHEADS=16
66
+ FFN_HIDDEN_SIZE=8192
67
+ SEQ_LEN=2048
68
+
69
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
70
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
71
+ else echo "invalid ROUND: $ROUND"
72
+ fi
73
+
74
+ OPTIMIZER_ARGS=" \
75
+ --optimizer adam \
76
+ --adam-beta1 0.9 \
77
+ --adam-beta2 0.999 \
78
+ --adam-eps 1e-8 \
79
+ --lr 2e-4 \
80
+ --min-lr 1e-5 \
81
+ --lr-decay-style cosine \
82
+ --lr-decay-samples 146_484_375 \
83
+ --lr-warmup-samples 183_105 \
84
+ --clip-grad 1.0 \
85
+ --weight-decay 1e-1 \
86
+ "
87
+
88
+ EXIT_OPTS=" \
89
+ --exit-duration-in-mins 1190 \
90
+ "
91
+
92
+ GPT_ARGS=" \
93
+ --num-layers $NLAYERS \
94
+ --hidden-size $NHIDDEN \
95
+ --num-attention-heads $NHEADS \
96
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
97
+ --seq-length $SEQ_LEN \
98
+ --max-position-embeddings $SEQ_LEN \
99
+ --micro-batch-size $MICRO_BATCH_SIZE \
100
+ --global-batch-size $GLOBAL_BATCH_SIZE \
101
+ --rampup-batch-size 32 32 2_000_000 \
102
+ --train-samples $TRAIN_ITER \
103
+ --vocab-file $VOCAB_FILE \
104
+ --merge-file $MERGE_FILE \
105
+ --loss-scale 12 \
106
+ --clip-grad 1.0 \
107
+ --fp16 \
108
+ --checkpoint-activations \
109
+ $OPTIMIZER_ARGS \
110
+ $EXIT_OPTS \
111
+ "
112
+
113
+ OUTPUT_ARGS=" \
114
+ --log-interval 200 \
115
+ --save-interval $SAVE_INTERVAL \
116
+ --eval-interval 1000 \
117
+ --eval-iters 100 \
118
+ --tensorboard-dir $TENSORBOARD_PATH \
119
+ --tensorboard-queue-size 5 \
120
+ --log-timers-to-tensorboard \
121
+ --log-batch-size-to-tensorboard \
122
+ --log-validation-ppl-to-tensorboard \
123
+ "
124
+
125
+ ZERO_STAGE=1
126
+
127
+ config_json="./ds_config.$SLURM_JOBID.json"
128
+
129
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
130
+ cat <<EOT > $config_json
131
+ {
132
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
133
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
134
+ "gradient_clipping": 1.0,
135
+ "zero_optimization": {
136
+ "stage": $ZERO_STAGE
137
+ },
138
+ "fp16": {
139
+ "enabled": true,
140
+ "loss_scale": 0,
141
+ "loss_scale_window": 500,
142
+ "hysteresis": 2,
143
+ "min_loss_scale": 1,
144
+ "initial_scale_power": 12
145
+ },
146
+ "steps_per_print": 2000,
147
+ "wall_clock_breakdown": false
148
+ }
149
+ EOT
150
+
151
+
152
+ DEEPSPEED_ARGS=" \
153
+ --deepspeed \
154
+ --deepspeed_config ${config_json} \
155
+ --zero-stage ${ZERO_STAGE} \
156
+ --deepspeed-activation-checkpointing \
157
+ "
158
+
159
+ export LAUNCHER="python -u -m torch.distributed.launch \
160
+ --nproc_per_node $GPUS_PER_NODE \
161
+ --nnodes $NNODES \
162
+ --master_addr $MASTER_ADDR \
163
+ --master_port $MASTER_PORT \
164
+ "
165
+
166
+ export CMD=" \
167
+ `pwd`/pretrain_gpt.py \
168
+ --tensor-model-parallel-size $TP_SIZE \
169
+ --pipeline-model-parallel-size $PP_SIZE \
170
+ $GPT_ARGS \
171
+ $OUTPUT_ARGS \
172
+ --save $CHECKPOINT_PATH \
173
+ --load $CHECKPOINT_PATH \
174
+ --data-path $DATA_PATH \
175
+ --data-impl mmap \
176
+ --split 949,50,1 \
177
+ --distributed-backend nccl \
178
+ $DEEPSPEED_ARGS \
179
+ "
180
+
181
+
182
+ # # clear old checkpoint as it'd mismatch while we sort things out
183
+ # rm -rf $SAVE_CHECKPOINT_PATH
184
+
185
+
186
+ echo $CMD
187
+
188
+ # We create the folder where the logs and codecarbon will be stored.
189
+ mkdir -p $LOGS_PATH
190
+ # to debug - add echo (it exits and prints what it would have launched)
191
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
train/tr12-1B3-oscar/tr12c-1B3-oscar-en-overfiltered.slurm ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3-oscar-en-overfiltered.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%a-%j.out # output file name
11
+ #SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+ #SBATCH --mail-type=ALL
14
+ #SBATCH [email protected]
15
+
16
+ set -x -e
17
+
18
+
19
+ ROUND=2
20
+ TESTING=0
21
+
22
+ EXPERIMENT_NAME=tr12c-1B3-oscar-en-overfiltered
23
+ OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/
24
+ CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints
25
+ REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
27
+ CODECARBON_PATH=$REPO_PATH/codecarbon
28
+ LOGS_PATH=$REPO_PATH/logs
29
+
30
+ MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed
31
+
32
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
33
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
34
+ DATA_PATH=/gpfsscratch/rech/six/urd43gx/oscar_en_v2_overfiltered/preprocessed_data_megatron/oscar_overfiltered_en_text_document
35
+
36
+ # defining the right environment variables
37
+ source $six_ALL_CCFRWORK/start-prod
38
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
39
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
40
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
41
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
42
+ export HF_DATASETS_OFFLINE=1
43
+ export TRANSFORMERS_OFFLINE=1
44
+ cd $MEGATRON_DEEPSPEED_REPO
45
+
46
+ # so processes know who to talk to
47
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
48
+ MASTER_PORT=6000
49
+
50
+ # adjust depending on the number of the nodes
51
+
52
+ # XXX: edit me
53
+ GPUS_PER_NODE=4
54
+ NNODES=16
55
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
56
+ TP_SIZE=1 # always fixed to the size of a single node
57
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
58
+
59
+ MICRO_BATCH_SIZE=1
60
+ GLOBAL_BATCH_SIZE=512
61
+ TRAIN_ITER=146_484_375
62
+
63
+ NLAYERS=24
64
+ NHIDDEN=2048
65
+ NHEADS=16
66
+ FFN_HIDDEN_SIZE=8192
67
+ SEQ_LEN=2048
68
+
69
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
70
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
71
+ else echo "invalid ROUND: $ROUND"
72
+ fi
73
+
74
+ OPTIMIZER_ARGS=" \
75
+ --optimizer adam \
76
+ --adam-beta1 0.9 \
77
+ --adam-beta2 0.999 \
78
+ --adam-eps 1e-8 \
79
+ --lr 2e-4 \
80
+ --min-lr 1e-5 \
81
+ --lr-decay-style cosine \
82
+ --lr-decay-samples 146_484_375 \
83
+ --lr-warmup-samples 183_105 \
84
+ --clip-grad 1.0 \
85
+ --weight-decay 1e-1 \
86
+ "
87
+
88
+ EXIT_OPTS=" \
89
+ --exit-duration-in-mins 1190 \
90
+ "
91
+
92
+ GPT_ARGS=" \
93
+ --num-layers $NLAYERS \
94
+ --hidden-size $NHIDDEN \
95
+ --num-attention-heads $NHEADS \
96
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
97
+ --seq-length $SEQ_LEN \
98
+ --max-position-embeddings $SEQ_LEN \
99
+ --micro-batch-size $MICRO_BATCH_SIZE \
100
+ --global-batch-size $GLOBAL_BATCH_SIZE \
101
+ --rampup-batch-size 32 32 2_000_000 \
102
+ --train-samples $TRAIN_ITER \
103
+ --vocab-file $VOCAB_FILE \
104
+ --merge-file $MERGE_FILE \
105
+ --loss-scale 12 \
106
+ --clip-grad 1.0 \
107
+ --fp16 \
108
+ --checkpoint-activations \
109
+ $OPTIMIZER_ARGS \
110
+ $EXIT_OPTS \
111
+ "
112
+
113
+ OUTPUT_ARGS=" \
114
+ --log-interval 200 \
115
+ --save-interval $SAVE_INTERVAL \
116
+ --eval-interval 1000 \
117
+ --eval-iters 100 \
118
+ --tensorboard-dir $TENSORBOARD_PATH \
119
+ --tensorboard-queue-size 5 \
120
+ --log-timers-to-tensorboard \
121
+ --log-batch-size-to-tensorboard \
122
+ --log-validation-ppl-to-tensorboard \
123
+ "
124
+
125
+ ZERO_STAGE=1
126
+
127
+ config_json="./ds_config.$SLURM_JOBID.json"
128
+
129
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
130
+ cat <<EOT > $config_json
131
+ {
132
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
133
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
134
+ "gradient_clipping": 1.0,
135
+ "zero_optimization": {
136
+ "stage": $ZERO_STAGE
137
+ },
138
+ "fp16": {
139
+ "enabled": true,
140
+ "loss_scale": 0,
141
+ "loss_scale_window": 500,
142
+ "hysteresis": 2,
143
+ "min_loss_scale": 1,
144
+ "initial_scale_power": 12
145
+ },
146
+ "steps_per_print": 2000,
147
+ "wall_clock_breakdown": false
148
+ }
149
+ EOT
150
+
151
+
152
+ DEEPSPEED_ARGS=" \
153
+ --deepspeed \
154
+ --deepspeed_config ${config_json} \
155
+ --zero-stage ${ZERO_STAGE} \
156
+ --deepspeed-activation-checkpointing \
157
+ "
158
+
159
+ export LAUNCHER="python -u -m torch.distributed.launch \
160
+ --nproc_per_node $GPUS_PER_NODE \
161
+ --nnodes $NNODES \
162
+ --master_addr $MASTER_ADDR \
163
+ --master_port $MASTER_PORT \
164
+ "
165
+
166
+ export CMD=" \
167
+ `pwd`/pretrain_gpt.py \
168
+ --tensor-model-parallel-size $TP_SIZE \
169
+ --pipeline-model-parallel-size $PP_SIZE \
170
+ $GPT_ARGS \
171
+ $OUTPUT_ARGS \
172
+ --save $CHECKPOINT_PATH \
173
+ --load $CHECKPOINT_PATH \
174
+ --data-path $DATA_PATH \
175
+ --data-impl mmap \
176
+ --split 949,50,1 \
177
+ --distributed-backend nccl \
178
+ $DEEPSPEED_ARGS \
179
+ "
180
+
181
+
182
+ # # clear old checkpoint as it'd mismatch while we sort things out
183
+ # rm -rf $SAVE_CHECKPOINT_PATH
184
+
185
+
186
+ echo $CMD
187
+
188
+ # We create the folder where the logs and codecarbon will be stored.
189
+ mkdir -p $LOGS_PATH
190
+ # to debug - add echo (it exits and prints what it would have launched)
191
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3mt.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3mt
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 8:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3mt
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mt_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mt_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+ # 250
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq-a100.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1b3xp3capmixnewcodelong
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=8
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ echo "START TIME: $(date)"
18
+
19
+ variant=xp3capmixnewcodelonglossseq
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13b-1B3-ml-t0
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
23
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13b-1B3-ml-logs
24
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
25
+ LOGS_PATH=$REPO_PATH/logs/$variant
26
+ mkdir -p $LOGS_PATH
27
+ mkdir -p $TENSORBOARD_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13b-1B3-mtf
33
+
34
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
35
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation.txt
36
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
37
+
38
+ # defining the right environment variables
39
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
40
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
41
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
42
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
43
+ export HF_DATASETS_OFFLINE=1
44
+ export TRANSFORMERS_OFFLINE=1
45
+
46
+ # testing for potential faulty nodes
47
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
48
+
49
+ # so processes know who to talk to
50
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
51
+ MASTER_PORT=6001
52
+
53
+ GPUS_PER_NODE=8
54
+ NNODES=$SLURM_NNODES
55
+
56
+ PP_SIZE=2
57
+ TP_SIZE=2
58
+
59
+ # T0 paper:
60
+ # ...truncate input and target sequences to 1024 and 256 tokens...
61
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
62
+ # We use 2048 total tokens and 512 batch size = 2**20
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=2048
65
+
66
+ NLAYERS=24
67
+ NHIDDEN=2048
68
+ NHEADS=16
69
+ SEQ_LEN=2048
70
+ # 250
71
+ SAVE_INTERVAL=2
72
+
73
+ TRAIN_SAMPLES=6_348_800
74
+
75
+ # T0 paper:
76
+ # "...we use a learning rate of 1e-3..."
77
+ # However, they use Adafactor, which adapts the LR
78
+ # For Adam we likely want a lower one
79
+ # FLAN:
80
+ # "...decay of 1e-4..""
81
+
82
+ # Uncomment for the first step
83
+ # --no-load-optim \
84
+ # --reset-progress \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ --reset-progress \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 2 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1b3xp3capmixnewcodelong
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ echo "START TIME: $(date)"
18
+
19
+ variant=xp3capmixnewcodelonglossseq
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13b-1B3-ml-t0
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
23
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13b-1B3-ml-logs
24
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
25
+ LOGS_PATH=$REPO_PATH/logs/$variant
26
+ mkdir -p $LOGS_PATH
27
+ mkdir -p $TENSORBOARD_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13b-1B3-mtf
33
+
34
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
35
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
36
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
37
+
38
+ # defining the right environment variables
39
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
40
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
41
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
42
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
43
+ export HF_DATASETS_OFFLINE=1
44
+ export TRANSFORMERS_OFFLINE=1
45
+
46
+ # testing for potential faulty nodes
47
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
48
+
49
+ # so processes know who to talk to
50
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
51
+ MASTER_PORT=6001
52
+
53
+ GPUS_PER_NODE=4
54
+ NNODES=$SLURM_NNODES
55
+
56
+ PP_SIZE=2
57
+ TP_SIZE=2
58
+
59
+ # T0 paper:
60
+ # ...truncate input and target sequences to 1024 and 256 tokens...
61
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
62
+ # We use 2048 total tokens and 512 batch size = 2**20
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=2048
65
+
66
+ NLAYERS=24
67
+ NHIDDEN=2048
68
+ NHEADS=16
69
+ SEQ_LEN=2048
70
+ # 250
71
+ SAVE_INTERVAL=2
72
+
73
+ TRAIN_SAMPLES=6_348_800
74
+
75
+ # T0 paper:
76
+ # "...we use a learning rate of 1e-3..."
77
+ # However, they use Adafactor, which adapts the LR
78
+ # For Adam we likely want a lower one
79
+ # FLAN:
80
+ # "...decay of 1e-4..""
81
+
82
+ # Uncomment for the first step
83
+ # --no-load-optim \
84
+ # --reset-progress \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ --reset-progress \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq-a100.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=2b5xp3capmixnewcodelong
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=16
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ echo "START TIME: $(date)"
19
+
20
+ variant=xp3capmixnewcodelonglossseq
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13c-2B5-ml-t0
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13c-2B5-ml-t0-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
26
+ LOGS_PATH=$REPO_PATH/logs/$variant
27
+ mkdir -p $LOGS_PATH
28
+ mkdir -p $TENSORBOARD_PATH
29
+
30
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13c-2B5-mtf
34
+
35
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
36
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation.txt
37
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
38
+
39
+ # defining the right environment variables
40
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
41
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
42
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
43
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
44
+ export HF_DATASETS_OFFLINE=1
45
+ export TRANSFORMERS_OFFLINE=1
46
+
47
+ # testing for potential faulty nodes
48
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
49
+
50
+ # so processes know who to talk to
51
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
52
+ MASTER_PORT=6001
53
+
54
+ GPUS_PER_NODE=8
55
+ NNODES=$SLURM_NNODES
56
+
57
+ PP_SIZE=2
58
+ TP_SIZE=1
59
+
60
+ # T0 paper:
61
+ # ...truncate input and target sequences to 1024 and 256 tokens...
62
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
63
+ # We use 2048 total tokens and 512 batch size = 2**20
64
+ MICRO_BATCH_SIZE=1
65
+ GLOBAL_BATCH_SIZE=2048
66
+
67
+ NLAYERS=30
68
+ NHIDDEN=2560
69
+ NHEADS=32
70
+ SEQ_LEN=2048
71
+ # 250
72
+ SAVE_INTERVAL=250
73
+
74
+ TRAIN_SAMPLES=6_348_800
75
+
76
+ # T0 paper:
77
+ # "...we use a learning rate of 1e-3..."
78
+ # However, they use Adafactor, which adapts the LR
79
+ # For Adam we likely want a lower one
80
+ # FLAN:
81
+ # "...decay of 1e-4..""
82
+
83
+ # Uncomment for the first step
84
+ # --no-load-optim \
85
+ # --reset-progress \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --norm-target-loss \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 125 \
133
+ --eval-iters 2 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=2b5xp3capmixnewcodelong
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=32
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ echo "START TIME: $(date)"
18
+
19
+ variant=xp3capmixnewcodelonglossseq
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13c-2B5-ml-t0
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
23
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13c-2B5-ml-t0-logs
24
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
25
+ LOGS_PATH=$REPO_PATH/logs/$variant
26
+ mkdir -p $LOGS_PATH
27
+ mkdir -p $TENSORBOARD_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13c-2B5-mtf
33
+
34
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
35
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
36
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
37
+
38
+ # defining the right environment variables
39
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
40
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
41
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
42
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
43
+ export HF_DATASETS_OFFLINE=1
44
+ export TRANSFORMERS_OFFLINE=1
45
+
46
+ # testing for potential faulty nodes
47
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
48
+
49
+ # so processes know who to talk to
50
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
51
+ MASTER_PORT=6001
52
+
53
+ GPUS_PER_NODE=4
54
+ NNODES=$SLURM_NNODES
55
+
56
+ PP_SIZE=2
57
+ TP_SIZE=1
58
+
59
+ # T0 paper:
60
+ # ...truncate input and target sequences to 1024 and 256 tokens...
61
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
62
+ # We use 2048 total tokens and 512 batch size = 2**20
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=2048
65
+
66
+ NLAYERS=30
67
+ NHIDDEN=2560
68
+ NHEADS=32
69
+ SEQ_LEN=2048
70
+ # 250
71
+ SAVE_INTERVAL=2
72
+
73
+ TRAIN_SAMPLES=6_348_800
74
+
75
+ # T0 paper:
76
+ # "...we use a learning rate of 1e-3..."
77
+ # However, they use Adafactor, which adapts the LR
78
+ # For Adam we likely want a lower one
79
+ # FLAN:
80
+ # "...decay of 1e-4..""
81
+
82
+ # Uncomment for the first step
83
+ # --no-load-optim \
84
+ # --reset-progress \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ --reset-progress \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 2 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-val.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=val350mxp3capmixnewcodelong
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ echo "START TIME: $(date)"
18
+
19
+ variant=xp3capmixnewcodelonglossseq
20
+
21
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13e-350M-ml-t0
22
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
23
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13e-350M-ml-logs
24
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
25
+ LOGS_PATH=$REPO_PATH/logs/$variant
26
+ mkdir -p $LOGS_PATH
27
+ mkdir -p $TENSORBOARD_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew350m/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13e-350M-mtf
33
+
34
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
35
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
36
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
37
+
38
+ # defining the right environment variables
39
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
40
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
41
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
42
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
43
+ export HF_DATASETS_OFFLINE=1
44
+ export TRANSFORMERS_OFFLINE=1
45
+
46
+ # testing for potential faulty nodes
47
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
48
+
49
+ # so processes know who to talk to
50
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
51
+ MASTER_PORT=6001
52
+
53
+ GPUS_PER_NODE=4
54
+ NNODES=$SLURM_NNODES
55
+
56
+ PP_SIZE=1
57
+ TP_SIZE=1
58
+
59
+ # T0 paper:
60
+ # ...truncate input and target sequences to 1024 and 256 tokens...
61
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
62
+ # We use 2048 total tokens and 512 batch size = 2**20
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=1024
65
+
66
+ NLAYERS=24
67
+ NHIDDEN=1024
68
+ NHEADS=16
69
+ SEQ_LEN=2048
70
+ # 250
71
+ SAVE_INTERVAL=2
72
+
73
+ TRAIN_SAMPLES=6_348_800
74
+
75
+ # T0 paper:
76
+ # "...we use a learning rate of 1e-3..."
77
+ # However, they use Adafactor, which adapts the LR
78
+ # For Adam we likely want a lower one
79
+ # FLAN:
80
+ # "...decay of 1e-4..""
81
+
82
+ # Uncomment for the first step
83
+ # --no-load-optim \
84
+ OPTIMIZER_ARGS=" \
85
+ --optimizer adam \
86
+ --adam-beta1 0.9 \
87
+ --adam-beta2 0.95 \
88
+ --adam-eps 1e-8 \
89
+ --lr 5e-4 \
90
+ --lr-decay-style constant \
91
+ --lr-warmup-samples 0 \
92
+ --clip-grad 1.0 \
93
+ --weight-decay 1e-4 \
94
+ --no-load-optim \
95
+ --norm-target-loss \
96
+ --reset-progress \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 125 \
133
+ --eval-iters 10 \
134
+ --eval-only True \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6B3-mtf-bos.slurm ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=bostr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=bos
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31bos_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31bos_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=1000
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ "
97
+ # for 20h 1190, for 100h 5990
98
+ # --exit-duration-in-mins 1190 \
99
+ EXIT_OPTS=" \
100
+ --exit-duration-in-mins 5990 \
101
+ "
102
+
103
+ GPT_ARGS=" \
104
+ --pp-partition-method 'type:transformer|embedding' \
105
+ --num-layers $NLAYERS \
106
+ --hidden-size $NHIDDEN \
107
+ --num-attention-heads $NHEADS \
108
+ --seq-length $SEQ_LEN \
109
+ --max-position-embeddings $SEQ_LEN \
110
+ --micro-batch-size $MICRO_BATCH_SIZE \
111
+ --global-batch-size $GLOBAL_BATCH_SIZE \
112
+ --train-samples $TRAIN_SAMPLES \
113
+ --tokenizer-type PretrainedFromHF \
114
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
115
+ --init-method-std 0.0048 \
116
+ --embed-layernorm \
117
+ --fp16 \
118
+ --seed 42 \
119
+ --position-embedding-type alibi \
120
+ --checkpoint-activations \
121
+ --abort-on-unmet-fused-kernel-constraints \
122
+ --kill-switch-path $KILL_SWITCH_PATH \
123
+ --pad-vocab-size-to 250880 \
124
+ $OPTIMIZER_ARGS \
125
+ $EXIT_OPTS \
126
+ "
127
+
128
+ OUTPUT_ARGS=" \
129
+ --log-interval 1 \
130
+ --save-interval $SAVE_INTERVAL \
131
+ --eval-interval 250 \
132
+ --eval-iters 50 \
133
+ --tensorboard-dir $TENSORBOARD_PATH \
134
+ --tensorboard-queue-size 5 \
135
+ --log-timers-to-tensorboard \
136
+ --log-batch-size-to-tensorboard \
137
+ --log-validation-ppl-to-tensorboard \
138
+ "
139
+
140
+ ZERO_STAGE=1
141
+
142
+ config_json="./ds_config.$SLURM_JOBID.json"
143
+
144
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
145
+ cat <<EOT > $config_json
146
+ {
147
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
148
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
149
+ "gradient_clipping": 1.0,
150
+ "zero_optimization": {
151
+ "stage": $ZERO_STAGE
152
+ },
153
+ "fp16": {
154
+ "enabled": true,
155
+ "loss_scale": 0,
156
+ "loss_scale_window": 500,
157
+ "hysteresis": 2,
158
+ "min_loss_scale": 1,
159
+ "initial_scale_power": 12
160
+ },
161
+ "steps_per_print": 2000,
162
+ "wall_clock_breakdown": false
163
+ }
164
+ EOT
165
+
166
+
167
+ DEEPSPEED_ARGS=" \
168
+ --deepspeed \
169
+ --deepspeed_config ${config_json} \
170
+ --zero-stage ${ZERO_STAGE} \
171
+ --deepspeed-activation-checkpointing \
172
+ "
173
+
174
+ export LAUNCHER="python -u -m torch.distributed.run \
175
+ --nproc_per_node $GPUS_PER_NODE \
176
+ --nnodes $NNODES \
177
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
178
+ --rdzv_backend c10d \
179
+ --max_restarts 0 \
180
+ --tee 3 \
181
+ "
182
+
183
+ export CMD=" \
184
+ `pwd`/finetune_t0.py \
185
+ --tensor-model-parallel-size $TP_SIZE \
186
+ --pipeline-model-parallel-size $PP_SIZE \
187
+ $GPT_ARGS \
188
+ $OUTPUT_ARGS \
189
+ --save $CHECKPOINT_PATH \
190
+ --load $CHECKPOINT_PATH \
191
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
192
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
193
+ --dataloader-type single \
194
+ --data-impl mmap \
195
+ --distributed-backend nccl \
196
+ $DEEPSPEED_ARGS \
197
+ "
198
+
199
+ echo $CMD
200
+
201
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
202
+ export CUDA_LAUNCH_BLOCKING=1
203
+
204
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
205
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
206
+
207
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
208
+
209
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3tr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=1000
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3mixed.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3mixedtr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3mixed
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mixed_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mixed_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6B3-mtf.slurm ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=logs/%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=main
22
+
23
+ DATA_OUTPUT_PATH=$SCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=/gpfsscratch/rech/six/uty16tp/checkpoints/tr13f-6B3-ml-t0/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=$WORK/code/big_science/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=1000
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ "
97
+ # for 20h 1190, for 100h 5990
98
+ # --exit-duration-in-mins 1190 \
99
+ EXIT_OPTS=" \
100
+ --exit-duration-in-mins 5990 \
101
+ "
102
+
103
+ GPT_ARGS=" \
104
+ --pp-partition-method 'type:transformer|embedding' \
105
+ --num-layers $NLAYERS \
106
+ --hidden-size $NHIDDEN \
107
+ --num-attention-heads $NHEADS \
108
+ --seq-length $SEQ_LEN \
109
+ --max-position-embeddings $SEQ_LEN \
110
+ --micro-batch-size $MICRO_BATCH_SIZE \
111
+ --global-batch-size $GLOBAL_BATCH_SIZE \
112
+ --train-samples $TRAIN_SAMPLES \
113
+ --tokenizer-type PretrainedFromHF \
114
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
115
+ --init-method-std 0.0048 \
116
+ --embed-layernorm \
117
+ --fp16 \
118
+ --seed 42 \
119
+ --position-embedding-type alibi \
120
+ --checkpoint-activations \
121
+ --abort-on-unmet-fused-kernel-constraints \
122
+ --kill-switch-path $KILL_SWITCH_PATH \
123
+ --pad-vocab-size-to 250880 \
124
+ $OPTIMIZER_ARGS \
125
+ $EXIT_OPTS \
126
+ "
127
+
128
+ OUTPUT_ARGS=" \
129
+ --log-interval 1 \
130
+ --save-interval $SAVE_INTERVAL \
131
+ --eval-interval 1000 \
132
+ --eval-iters 10 \
133
+ --tensorboard-dir $TENSORBOARD_PATH \
134
+ --tensorboard-queue-size 5 \
135
+ --log-timers-to-tensorboard \
136
+ --log-batch-size-to-tensorboard \
137
+ --log-validation-ppl-to-tensorboard \
138
+ "
139
+
140
+ ZERO_STAGE=1
141
+
142
+ config_json="./ds_config.$SLURM_JOBID.json"
143
+
144
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
145
+ cat <<EOT > $config_json
146
+ {
147
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
148
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
149
+ "gradient_clipping": 1.0,
150
+ "zero_optimization": {
151
+ "stage": $ZERO_STAGE
152
+ },
153
+ "fp16": {
154
+ "enabled": true,
155
+ "loss_scale": 0,
156
+ "loss_scale_window": 500,
157
+ "hysteresis": 2,
158
+ "min_loss_scale": 1,
159
+ "initial_scale_power": 12
160
+ },
161
+ "steps_per_print": 2000,
162
+ "wall_clock_breakdown": false
163
+ }
164
+ EOT
165
+
166
+
167
+ DEEPSPEED_ARGS=" \
168
+ --deepspeed \
169
+ --deepspeed_config ${config_json} \
170
+ --zero-stage ${ZERO_STAGE} \
171
+ --deepspeed-activation-checkpointing \
172
+ "
173
+
174
+ export LAUNCHER="python -u -m torch.distributed.run \
175
+ --nproc_per_node $GPUS_PER_NODE \
176
+ --nnodes $NNODES \
177
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
178
+ --rdzv_backend c10d \
179
+ --max_restarts 0 \
180
+ --tee 3 \
181
+ "
182
+
183
+ export CMD=" \
184
+ `pwd`/finetune_t0_non_causal_decoder.py \
185
+ --tensor-model-parallel-size $TP_SIZE \
186
+ --pipeline-model-parallel-size $PP_SIZE \
187
+ $GPT_ARGS \
188
+ $OUTPUT_ARGS \
189
+ --save $CHECKPOINT_PATH \
190
+ --load $CHECKPOINT_PATH \
191
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
192
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
193
+ --dataloader-type single \
194
+ --data-impl mmap \
195
+ --distributed-backend nccl \
196
+ $DEEPSPEED_ARGS \
197
+ "
198
+
199
+ echo $CMD
200
+
201
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
202
+ export CUDA_LAUNCH_BLOCKING=1
203
+
204
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
205
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
206
+
207
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
208
+
209
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31lossseq.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=p31tr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=p31
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseq/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3cap.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3captr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3cap
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3cap_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3cap_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixfixlossseq
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixfixlossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfix_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfix_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseq.slurm ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3mixedtr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixlossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseq/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmix_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmix_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --seed 42 \
120
+ --position-embedding-type alibi \
121
+ --checkpoint-activations \
122
+ --abort-on-unmet-fused-kernel-constraints \
123
+ --kill-switch-path $KILL_SWITCH_PATH \
124
+ --pad-vocab-size-to 250880 \
125
+ $OPTIMIZER_ARGS \
126
+ $EXIT_OPTS \
127
+ "
128
+
129
+ OUTPUT_ARGS=" \
130
+ --log-interval 1 \
131
+ --save-interval $SAVE_INTERVAL \
132
+ --eval-interval 250 \
133
+ --eval-iters 50 \
134
+ --tensorboard-dir $TENSORBOARD_PATH \
135
+ --tensorboard-queue-size 5 \
136
+ --log-timers-to-tensorboard \
137
+ --log-batch-size-to-tensorboard \
138
+ --log-validation-ppl-to-tensorboard \
139
+ "
140
+
141
+ ZERO_STAGE=1
142
+
143
+ config_json="./ds_config.$SLURM_JOBID.json"
144
+
145
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
146
+ cat <<EOT > $config_json
147
+ {
148
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
149
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
150
+ "gradient_clipping": 1.0,
151
+ "zero_optimization": {
152
+ "stage": $ZERO_STAGE
153
+ },
154
+ "fp16": {
155
+ "enabled": true,
156
+ "loss_scale": 0,
157
+ "loss_scale_window": 500,
158
+ "hysteresis": 2,
159
+ "min_loss_scale": 1,
160
+ "initial_scale_power": 12
161
+ },
162
+ "steps_per_print": 2000,
163
+ "wall_clock_breakdown": false
164
+ }
165
+ EOT
166
+
167
+
168
+ DEEPSPEED_ARGS=" \
169
+ --deepspeed \
170
+ --deepspeed_config ${config_json} \
171
+ --zero-stage ${ZERO_STAGE} \
172
+ --deepspeed-activation-checkpointing \
173
+ "
174
+
175
+ export LAUNCHER="python -u -m torch.distributed.run \
176
+ --nproc_per_node $GPUS_PER_NODE \
177
+ --nnodes $NNODES \
178
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
179
+ --rdzv_backend c10d \
180
+ --max_restarts 0 \
181
+ --tee 3 \
182
+ "
183
+
184
+ export CMD=" \
185
+ `pwd`/finetune_t0.py \
186
+ --tensor-model-parallel-size $TP_SIZE \
187
+ --pipeline-model-parallel-size $PP_SIZE \
188
+ $GPT_ARGS \
189
+ $OUTPUT_ARGS \
190
+ --save $CHECKPOINT_PATH \
191
+ --load $CHECKPOINT_PATH \
192
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
193
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
194
+ --dataloader-type single \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
209
+
210
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqbos.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixlossseqbos
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixlossseqbos
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv3bos_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv3bos_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixnewcodelong
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixnewcodelonglossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+ # 250
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 125 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnostriplossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixnostriplossseq
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixnostriplossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnostrip_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnostrip_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixv2lossseqbitfit.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3capmixv2lossseqbitfit
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3capmixv2lossseqbitfit
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqbitfit/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv2_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv2_validation.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 4e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ "
98
+ # for 20h 1190, for 100h 5990
99
+ # --exit-duration-in-mins 1190 \
100
+ EXIT_OPTS=" \
101
+ --exit-duration-in-mins 5990 \
102
+ "
103
+
104
+ GPT_ARGS=" \
105
+ --pp-partition-method 'type:transformer|embedding' \
106
+ --num-layers $NLAYERS \
107
+ --hidden-size $NHIDDEN \
108
+ --num-attention-heads $NHEADS \
109
+ --seq-length $SEQ_LEN \
110
+ --max-position-embeddings $SEQ_LEN \
111
+ --micro-batch-size $MICRO_BATCH_SIZE \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --fp16 \
119
+ --bitfit \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 50 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3zzlossseq.slurm ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3zzlossseq
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=8
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=xp3zzlossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3zzlossseq_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ PP_SIZE=1
59
+ TP_SIZE=1
60
+
61
+ # T0 paper:
62
+ # ...truncate input and target sequences to 1024 and 256 tokens...
63
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
64
+ # We use 2048 total tokens and 512 batch size = 2**20
65
+ MICRO_BATCH_SIZE=4
66
+ GLOBAL_BATCH_SIZE=2048
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+ # 250
73
+ SAVE_INTERVAL=2
74
+
75
+ TRAIN_SAMPLES=6_348_800
76
+
77
+ # T0 paper:
78
+ # "...we use a learning rate of 1e-3..."
79
+ # However, they use Adafactor, which adapts the LR
80
+ # For Adam we likely want a lower one
81
+ # FLAN:
82
+ # "...decay of 1e-4..""
83
+
84
+ # Uncomment for the first step
85
+ # --no-load-optim \
86
+ OPTIMIZER_ARGS=" \
87
+ --optimizer adam \
88
+ --adam-beta1 0.9 \
89
+ --adam-beta2 0.95 \
90
+ --adam-eps 1e-8 \
91
+ --lr 2e-5 \
92
+ --lr-decay-style constant \
93
+ --lr-warmup-samples 0 \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-4 \
96
+ --no-load-optim \
97
+ --norm-target-loss \
98
+ "
99
+ # for 20h 1190, for 100h 5990
100
+ # --exit-duration-in-mins 1190 \
101
+ EXIT_OPTS=" \
102
+ --exit-duration-in-mins 5990 \
103
+ "
104
+
105
+ GPT_ARGS=" \
106
+ --pp-partition-method 'type:transformer|embedding' \
107
+ --num-layers $NLAYERS \
108
+ --hidden-size $NHIDDEN \
109
+ --num-attention-heads $NHEADS \
110
+ --seq-length $SEQ_LEN \
111
+ --max-position-embeddings $SEQ_LEN \
112
+ --micro-batch-size $MICRO_BATCH_SIZE \
113
+ --global-batch-size $GLOBAL_BATCH_SIZE \
114
+ --train-samples $TRAIN_SAMPLES \
115
+ --tokenizer-type PretrainedFromHF \
116
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
117
+ --init-method-std 0.0048 \
118
+ --embed-layernorm \
119
+ --fp16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ OUTPUT_ARGS=" \
131
+ --log-interval 1 \
132
+ --save-interval $SAVE_INTERVAL \
133
+ --eval-interval 250 \
134
+ --eval-iters 10 \
135
+ --tensorboard-dir $TENSORBOARD_PATH \
136
+ --tensorboard-queue-size 5 \
137
+ --log-timers-to-tensorboard \
138
+ --log-batch-size-to-tensorboard \
139
+ --log-validation-ppl-to-tensorboard \
140
+ "
141
+
142
+ ZERO_STAGE=1
143
+
144
+ config_json="./ds_config.$SLURM_JOBID.json"
145
+
146
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
147
+ cat <<EOT > $config_json
148
+ {
149
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
150
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
151
+ "gradient_clipping": 1.0,
152
+ "zero_optimization": {
153
+ "stage": $ZERO_STAGE
154
+ },
155
+ "fp16": {
156
+ "enabled": true,
157
+ "loss_scale": 0,
158
+ "loss_scale_window": 500,
159
+ "hysteresis": 2,
160
+ "min_loss_scale": 1,
161
+ "initial_scale_power": 12
162
+ },
163
+ "steps_per_print": 2000,
164
+ "wall_clock_breakdown": false
165
+ }
166
+ EOT
167
+
168
+
169
+ DEEPSPEED_ARGS=" \
170
+ --deepspeed \
171
+ --deepspeed_config ${config_json} \
172
+ --zero-stage ${ZERO_STAGE} \
173
+ --deepspeed-activation-checkpointing \
174
+ "
175
+
176
+ export LAUNCHER="python -u -m torch.distributed.run \
177
+ --nproc_per_node $GPUS_PER_NODE \
178
+ --nnodes $NNODES \
179
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
180
+ --rdzv_backend c10d \
181
+ --max_restarts 0 \
182
+ --tee 3 \
183
+ "
184
+
185
+ export CMD=" \
186
+ `pwd`/finetune_t0.py \
187
+ --tensor-model-parallel-size $TP_SIZE \
188
+ --pipeline-model-parallel-size $PP_SIZE \
189
+ $GPT_ARGS \
190
+ $OUTPUT_ARGS \
191
+ --save $CHECKPOINT_PATH \
192
+ --load $CHECKPOINT_PATH \
193
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
194
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
195
+ --dataloader-type single \
196
+ --data-impl mmap \
197
+ --distributed-backend nccl \
198
+ $DEEPSPEED_ARGS \
199
+ "
200
+
201
+ echo $CMD
202
+
203
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
204
+ export CUDA_LAUNCH_BLOCKING=1
205
+
206
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
207
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
208
+
209
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
210
+
211
+ echo "END TIME: $(date)"
train/tr13-mtf/tr13-176B-mtf-p31lossseq-val.slurm ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=val-tr13-176B-ml-p31lossseq
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=36
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=p31lossseq
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13-176B-ml-t0
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13-176B-ml-t0-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+ mkdir -p $TENSORBOARD_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13-176B-mtf
35
+
36
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt
37
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation_pretr.txt
38
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
39
+
40
+ # defining the right environment variables
41
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
42
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
43
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
44
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
45
+ export HF_DATASETS_OFFLINE=1
46
+ export TRANSFORMERS_OFFLINE=1
47
+
48
+ # testing for potential faulty nodes
49
+ # srun --jobid $SLURM_JOB_ID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
50
+
51
+ # so processes know who to talk to
52
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
53
+ MASTER_PORT=6001
54
+
55
+ GPUS_PER_NODE=8
56
+ NNODES=$SLURM_NNODES
57
+
58
+ # TP=1/PP=72/MBS=1/Nodes=36
59
+ PP_SIZE=72
60
+ TP_SIZE=1
61
+
62
+ # T0 paper:
63
+ # ...truncate input and target sequences to 1024 and 256 tokens...
64
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
65
+ # We use 2048 total tokens and 512 batch size = 2**20
66
+ MICRO_BATCH_SIZE=1
67
+ GLOBAL_BATCH_SIZE=2048
68
+
69
+ NHIDDEN=14336
70
+ NLAYERS=70
71
+ NHEADS=112
72
+ SEQ_LEN=2048
73
+
74
+ # After CKPT is saved; stop training; change to desired SAVE_INTERNAL & remove no-load-optim & remove universal ckpt
75
+ SAVE_INTERVAL=5
76
+
77
+ TRAIN_SAMPLES=6_348_800
78
+
79
+ # T0 paper:
80
+ # "...we use a learning rate of 1e-3..."
81
+ # However, they use Adafactor, which adapts the LR
82
+ # For Adam we likely want a lower one
83
+ # FLAN:
84
+ # "...decay of 1e-4..""
85
+
86
+ # Uncomment for the first step
87
+ # --no-load-optim \
88
+ # --reset-progress \
89
+ OPTIMIZER_ARGS=" \
90
+ --optimizer adam \
91
+ --adam-beta1 0.9 \
92
+ --adam-beta2 0.95 \
93
+ --adam-eps 1e-8 \
94
+ --lr 2e-5 \
95
+ --lr-decay-style constant \
96
+ --lr-warmup-samples 0 \
97
+ --clip-grad 1.0 \
98
+ --weight-decay 1e-4 \
99
+ --no-load-optim \
100
+ --norm-target-loss \
101
+ "
102
+ # for 20h 1190, for 100h 5990
103
+ # --exit-duration-in-mins 1190 \
104
+ EXIT_OPTS=" \
105
+ --exit-duration-in-mins 5990 \
106
+ "
107
+
108
+ GPT_ARGS=" \
109
+ --pp-partition-method 'type:transformer|embedding' \
110
+ --num-layers $NLAYERS \
111
+ --hidden-size $NHIDDEN \
112
+ --num-attention-heads $NHEADS \
113
+ --seq-length $SEQ_LEN \
114
+ --max-position-embeddings $SEQ_LEN \
115
+ --micro-batch-size $MICRO_BATCH_SIZE \
116
+ --global-batch-size $GLOBAL_BATCH_SIZE \
117
+ --train-samples $TRAIN_SAMPLES \
118
+ --tokenizer-type PretrainedFromHF \
119
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
120
+ --init-method-std 0.0048 \
121
+ --embed-layernorm \
122
+ --sync-tp-duplicated-parameters \
123
+ --bf16 \
124
+ --seed 42 \
125
+ --position-embedding-type alibi \
126
+ --checkpoint-activations \
127
+ --abort-on-unmet-fused-kernel-constraints \
128
+ --kill-switch-path $KILL_SWITCH_PATH \
129
+ --pad-vocab-size-to 250880 \
130
+ $OPTIMIZER_ARGS \
131
+ $EXIT_OPTS \
132
+ "
133
+
134
+ OUTPUT_ARGS=" \
135
+ --log-interval 1 \
136
+ --save-interval $SAVE_INTERVAL \
137
+ --eval-interval 250 \
138
+ --eval-iters 10 \
139
+ --eval-only True \
140
+ --tensorboard-dir $TENSORBOARD_PATH \
141
+ --tensorboard-queue-size 5 \
142
+ --log-timers-to-tensorboard \
143
+ --log-batch-size-to-tensorboard \
144
+ --log-validation-ppl-to-tensorboard \
145
+ "
146
+
147
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
148
+
149
+ config_json="./ds_config.$SLURM_JOBID.json"
150
+
151
+
152
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
153
+ cat <<EOT > $config_json
154
+ {
155
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
156
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
157
+ "gradient_clipping": 1.0,
158
+ "zero_optimization": {
159
+ "stage": $ZERO_STAGE
160
+ },
161
+ "bf16": {
162
+ "enabled": true
163
+ },
164
+ "steps_per_print": 2000,
165
+ "wall_clock_breakdown": false
166
+ }
167
+ EOT
168
+
169
+
170
+ DEEPSPEED_ARGS=" \
171
+ --deepspeed \
172
+ --deepspeed_config ${config_json} \
173
+ --zero-stage ${ZERO_STAGE} \
174
+ --deepspeed-activation-checkpointing \
175
+ "
176
+
177
+ export LAUNCHER="python -u -m torch.distributed.run \
178
+ --nproc_per_node $GPUS_PER_NODE \
179
+ --nnodes $NNODES \
180
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
181
+ --rdzv_backend c10d \
182
+ --max_restarts 0 \
183
+ --tee 3 \
184
+ "
185
+
186
+ # --universal-checkpoint \
187
+ export CMD=" \
188
+ `pwd`/finetune_t0.py \
189
+ --universal-checkpoint \
190
+ --tensor-model-parallel-size $TP_SIZE \
191
+ --pipeline-model-parallel-size $PP_SIZE \
192
+ $GPT_ARGS \
193
+ $OUTPUT_ARGS \
194
+ --save $CHECKPOINT_PATH \
195
+ --load $CHECKPOINT_PATH \
196
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
197
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
198
+ --dataloader-type single \
199
+ --data-impl mmap \
200
+ --distributed-backend nccl \
201
+ $DEEPSPEED_ARGS \
202
+ "
203
+
204
+ echo $CMD
205
+
206
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
207
+ export CUDA_LAUNCH_BLOCKING=1
208
+
209
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
210
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
211
+
212
+ # force crashing on nccl issues like hanging broadcast
213
+ export NCCL_ASYNC_ERROR_HANDLING=1
214
+
215
+ # srun error handling:
216
+ # --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
217
+ # --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
218
+ SRUN_ARGS=" \
219
+ --wait=60 \
220
+ --kill-on-bad-exit=1 \
221
+ "
222
+
223
+ clear; srun $SRUN_ARGS --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
224
+
225
+ echo "END TIME: $(date)"
train/tr14-mup/grid_search.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ for inpm in 10 1 0.1 0.01 0.001
2
+ do
3
+ for outm in 10 1 0.1 0.01 0.001
4
+ do
5
+ for atnm in 10 1 0.1 0.01 0.001
6
+ do
7
+ for lr in 0.1 0.03 0.01 0.003 0.001
8
+ do
9
+ for init in 1 0.3 0.1 0.03 0.01
10
+ do
11
+ sbatch --job-name=tr14-39M-lr$lr-init$init-inpm$inpm-outm$outm-atnm$atnm-mup tr14-39M-grid-search-mup.slurm $lr $init $inpm $outm $atnm
12
+ done
13
+ done
14
+ done
15
+ done
16
+ done
train/tr14-mup/lr_sweep.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ for lr in 0.052 0.017 0.0052 0.0017; do
2
+ sbatch --job-name=tr14-39M-lr$lr-init0.1-inpm10-outm10-atnm10-mup tr14-39M-grid-search-mup.slurm $lr 0.1 10 10 10
3
+ done
4
+
5
+ for lr in 0.01 0.052 0.03 0.017 0.01 0.0052 0.003 0.0017 0.001; do
6
+ sbatch --job-name=tr14-2B7-lr$lr-init0.1-inpm10-outm10-atnm10-mup tr14-2B7-grid-search-mup.slurm $lr 0.1 10 10 10
7
+ done
train/tr14-mup/tr14-2B7-grid-search-mup.slurm ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --qos=qos_gpu-t3
3
+ #SBATCH --nodes=2
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:8 # number of gpus
8
+ #SBATCH --time 04:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x.out # output file name
10
+ #SBATCH --partition=gpu_p5
11
+ #SBATCH --account=ajs@a100
12
+ #SBATCH -C a100
13
+
14
+ set -x -e
15
+
16
+ #source $ajs_ALL_CCFRWORK/start-py38-pt110
17
+ #source $ajs_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_PATH=$ajs_ALL_CCFRSCRATCH/datasets/c4/gpt2tok_c4_text_document
25
+ DATA_OUTPUT_PATH=$ajs_ALL_CCFRSCRATCH/checkpoints/tr14-2B7-mup-lr$1-init$2-inpm$3-outm$4-atnm$5-mup
26
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
27
+ REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-mup-lr$1-init$2-inpm$3-outm$4-atnm$5-mup-logs
28
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
29
+ LOGS_PATH=$REPO_PATH/logs/$variant
30
+ mkdir -p $LOGS_PATH
31
+
32
+ MEGATRON_DEEPSPEED_REPO=$ajs_ALL_CCFRWORK/code/Megatron-DeepSpeed
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ TOKENIZER_NAME_OR_PATH=t5-small
36
+
37
+ # defining the right environment variables
38
+ export TRANSFORMERS_CACHE=$ajs_ALL_CCFRWORK/models
39
+ export HF_DATASETS_CACHE=$ajs_ALL_CCFRWORK/datasets
40
+ export HF_MODULES_CACHE=$ajs_ALL_CCFRWORK/modules
41
+ export HF_METRICS_CACHE=$ajs_ALL_CCFRWORK/metrics
42
+ export HF_DATASETS_OFFLINE=1
43
+ export TRANSFORMERS_OFFLINE=1
44
+
45
+ # testing for potential faulty nodes
46
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
47
+
48
+ # so processes know who to talk to
49
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
50
+ MASTER_PORT=6000
51
+
52
+ GPUS_PER_NODE=8
53
+ NNODES=$SLURM_NNODES
54
+
55
+ PP_SIZE=1
56
+ TP_SIZE=2
57
+
58
+ MICRO_BATCH_SIZE=1
59
+ GLOBAL_BATCH_SIZE=512
60
+
61
+ NLAYERS=32
62
+ NHIDDEN=2560
63
+ NHEADS=32
64
+ SEQ_LEN=2048
65
+
66
+ SAVE_INTERVAL=250
67
+
68
+ TRAIN_SAMPLES=25_000_000 # 51.2B tokens
69
+ LR_DECAY_SAMPLES=25_000_000 # Decay in the same amount
70
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
71
+
72
+
73
+ MUP_ARGS=" \
74
+ --lr $1 \
75
+ --min-lr `bc <<< "scale=3; $1/10"` \
76
+ --init-method-std $2 \
77
+ --mup \
78
+ --mup-input-mult $3 \
79
+ --mup-output-mult $4 \
80
+ --mup-attn-mult $5 \
81
+ "
82
+
83
+
84
+ OPTIMIZER_ARGS=" \
85
+ --optimizer adam \
86
+ --adam-beta1 0.9 \
87
+ --adam-beta2 0.95 \
88
+ --adam-eps 1e-8 \
89
+ --lr-decay-style cosine \
90
+ --lr-decay-samples $LR_DECAY_SAMPLES \
91
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
92
+ --clip-grad 1.0 \
93
+ --weight-decay 1e-1 \
94
+ "
95
+ # for 20h 1190, for 100h 5990
96
+ EXIT_OPTS=" \
97
+ --exit-duration-in-mins 1190 \
98
+ "
99
+
100
+ GPT_ARGS=" \
101
+ --pp-partition-method 'type:transformer' \
102
+ --num-layers $NLAYERS \
103
+ --hidden-size $NHIDDEN \
104
+ --num-attention-heads $NHEADS \
105
+ --seq-length $SEQ_LEN \
106
+ --max-position-embeddings $SEQ_LEN \
107
+ --micro-batch-size $MICRO_BATCH_SIZE \
108
+ --rampup-batch-size 192 32 9_765_625 \
109
+ --global-batch-size $GLOBAL_BATCH_SIZE \
110
+ --train-samples $TRAIN_SAMPLES \
111
+ --tokenizer-type PretrainedFromHF \
112
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
113
+ --embed-layernorm \
114
+ --fp16 \
115
+ --seed 42 \
116
+ --position-embedding-type alibi \
117
+ --checkpoint-activations \
118
+ --abort-on-unmet-fused-kernel-constraints \
119
+ --pad-vocab-size-to 51200 \
120
+ $OPTIMIZER_ARGS \
121
+ $EXIT_OPTS \
122
+ "
123
+
124
+ # TODO: decide on efficient eval-interval + eval-iters
125
+
126
+ OUTPUT_ARGS=" \
127
+ --log-interval 1 \
128
+ --save-interval $SAVE_INTERVAL \
129
+ --eval-interval 1000 \
130
+ --eval-iters 1 \
131
+ --tensorboard-dir $TENSORBOARD_PATH \
132
+ --tensorboard-queue-size 5 \
133
+ --log-timers-to-tensorboard \
134
+ --log-batch-size-to-tensorboard \
135
+ --log-validation-ppl-to-tensorboard \
136
+ "
137
+
138
+ ZERO_STAGE=1
139
+
140
+ config_json="./ds_config.$SLURM_JOBID.json"
141
+
142
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
143
+ cat <<EOT > $config_json
144
+ {
145
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
146
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
147
+ "gradient_clipping": 1.0,
148
+ "zero_optimization": {
149
+ "stage": $ZERO_STAGE
150
+ },
151
+ "fp16": {
152
+ "enabled": true,
153
+ "loss_scale": 0,
154
+ "loss_scale_window": 500,
155
+ "hysteresis": 2,
156
+ "min_loss_scale": 1,
157
+ "initial_scale_power": 12
158
+ },
159
+ "steps_per_print": 2000,
160
+ "wall_clock_breakdown": false
161
+ }
162
+ EOT
163
+
164
+
165
+ DEEPSPEED_ARGS=" \
166
+ --deepspeed \
167
+ --deepspeed_config ${config_json} \
168
+ --zero-stage ${ZERO_STAGE} \
169
+ --deepspeed-activation-checkpointing \
170
+ "
171
+
172
+ export LAUNCHER="python -u -m torch.distributed.run \
173
+ --nproc_per_node $GPUS_PER_NODE \
174
+ --nnodes $NNODES \
175
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
176
+ --rdzv_backend c10d \
177
+ --max_restarts 0 \
178
+ --tee 3 \
179
+ "
180
+
181
+ export CMD=" \
182
+ `pwd`/pretrain_gpt.py \
183
+ --tensor-model-parallel-size $TP_SIZE \
184
+ --pipeline-model-parallel-size $PP_SIZE \
185
+ $GPT_ARGS \
186
+ $OUTPUT_ARGS \
187
+ $MUP_ARGS \
188
+ --save $CHECKPOINT_PATH \
189
+ --load $CHECKPOINT_PATH \
190
+ --data-path $DATA_PATH \
191
+ --data-impl mmap \
192
+ --distributed-backend nccl \
193
+ $DEEPSPEED_ARGS \
194
+ "
195
+
196
+ echo $CMD
197
+
198
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
199
+ export CUDA_LAUNCH_BLOCKING=1
200
+
201
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
202
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
203
+
204
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
205
+
206
+ echo "END TIME: $(date)"
train/tr14-mup/tr14-2B7-mup-cluster.slurm ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr14-2B7-mup
3
+ #SBATCH --partition=production-cluster
4
+ #SBATCH --nodes=8
5
+ #SBATCH --cpus-per-task=12
6
+ #SBATCH --ntasks-per-node=1
7
+ #SBATCH --gres=gpu:a100:8
8
+ #SBATCH --hint=nomultithread
9
+ #SBATCH --time 100:00:00
10
+ #SBATCH --output=/fsx/teven/mup/tr14-2B7-%j.out
11
+ #SBATCH --exclude=ip-26-0-159-215,ip-26-0-153-238
12
+
13
+ echo "START TIME: $(date)"
14
+
15
+ mkdir -p $LOGS_PATH
16
+
17
+ # >>> conda initialize >>>
18
+ # !! Contents within this block are managed by 'conda init' !!
19
+ __conda_setup="$('/admin/home/teven/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
20
+ if [ $? -eq 0 ]; then
21
+ eval "$__conda_setup"
22
+ else
23
+ if [ -f "/admin/home/teven/miniconda3/etc/profile.d/conda.sh" ]; then
24
+ . "/admin/home/teven/miniconda3/etc/profile.d/conda.sh"
25
+ else
26
+ export PATH="/admin/home/teven/miniconda3/bin:$PATH"
27
+ fi
28
+ fi
29
+ unset __conda_setup
30
+ # <<< conda initialize <<<
31
+
32
+ # Proper env variables
33
+ conda activate tvn_dev
34
+ export PATH=/usr/local/cuda-11.4/bin:$PATH
35
+ export NCCL_PROTO=simple
36
+ export PATH=/opt/amazon/efa/bin:$PATH
37
+
38
+ export FI_EFA_FORK_SAFE=1
39
+ export FI_LOG_LEVEL=1
40
+ export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn
41
+
42
+ #export NCCL_ALGO=ring
43
+ #export NCCL_DEBUG=info
44
+ #export NCCL_DEBUG_SUBSYS=INIT,ENV,GRAPH,COLL
45
+
46
+ export PYTHONFAULTHANDLER=1
47
+
48
+ export CUDA_LAUNCH_BLOCKING=0
49
+ export OMPI_MCA_mtl_base_verbose=1
50
+ export FI_EFA_ENABLE_SHM_TRANSFER=0
51
+ export FI_PROVIDER=efa
52
+ export FI_EFA_TX_MIN_CREDITS=64
53
+ export NCCL_TREE_THRESHOLD=0
54
+ #export TORCH_CPP_LOG_LEVEL=INFO
55
+ #export TORCH_DISTRIBUTED_DEBUG=INFO
56
+
57
+ export NCCL_ASYNC_ERROR_HANDLING=1
58
+ #export NCCL_P2P_DISABLE=1
59
+ #export NCCL_IBEXT_DISABLE=1
60
+ #export NCCL_SOCKET_IFNAME="eth0,en,eth,em,bond"
61
+
62
+ # testing for potential faulty nodes
63
+ srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
64
+
65
+ # so processes know who to talk to
66
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
67
+ export MASTER_PORT=12802
68
+
69
+
70
+ MEGATRON_DEEPSPEED_REPO=/fsx/teven/Megatron-DeepSpeed
71
+ cd $MEGATRON_DEEPSPEED_REPO
72
+
73
+ TOKENIZER_NAME_OR_PATH=t5-small
74
+
75
+ variant=main
76
+
77
+ DATA_PATH=/fsx/data/gpt2tok_c4_text_document
78
+ DATA_OUTPUT_PATH=/fsx/mup_exps/checkpoints/tr14-2B7-lr$1-init0.1-inpm10-outm10-atnm10-mup
79
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
80
+ REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-test-lr$1-init0.1-inpm10-outm10-atnm10-mup
81
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
82
+ LOGS_PATH=$REPO_PATH/logs/$variant
83
+
84
+ GPUS_PER_NODE=8
85
+ NNODES=$SLURM_NNODES
86
+
87
+ PP_SIZE=1
88
+ TP_SIZE=2
89
+
90
+ MICRO_BATCH_SIZE=16
91
+ GLOBAL_BATCH_SIZE=512
92
+
93
+ NLAYERS=32
94
+ NHIDDEN=2560
95
+ NHEADS=32
96
+ SEQ_LEN=2048
97
+
98
+ SAVE_INTERVAL=250
99
+
100
+ TRAIN_SAMPLES=1_953_125 # 50B tokens
101
+ LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount
102
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
103
+
104
+
105
+ MUP_ARGS=" \
106
+ --lr $1 \
107
+ --min-lr `bc <<< "scale=3; $1/10"` \
108
+ --init-method-std 0.1 \
109
+ --mup \
110
+ --mup-input-mult 10 \
111
+ --mup-output-mult 10 \
112
+ --mup-attn-mult 10 \
113
+ "
114
+
115
+
116
+ OPTIMIZER_ARGS=" \
117
+ --optimizer adam \
118
+ --adam-beta1 0.9 \
119
+ --adam-beta2 0.95 \
120
+ --adam-eps 1e-8 \
121
+ --lr-decay-style cosine \
122
+ --lr-decay-samples $LR_DECAY_SAMPLES \
123
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
124
+ --clip-grad 1.0 \
125
+ --weight-decay 1e-1 \
126
+ "
127
+ # for 20h 1190, for 100h 5990
128
+ EXIT_OPTS=" \
129
+ --exit-duration-in-mins 1190 \
130
+ "
131
+
132
+ GPT_ARGS=" \
133
+ --pp-partition-method 'type:transformer' \
134
+ --num-layers $NLAYERS \
135
+ --hidden-size $NHIDDEN \
136
+ --num-attention-heads $NHEADS \
137
+ --seq-length $SEQ_LEN \
138
+ --max-position-embeddings $SEQ_LEN \
139
+ --micro-batch-size $MICRO_BATCH_SIZE \
140
+ --global-batch-size $GLOBAL_BATCH_SIZE \
141
+ --train-samples $TRAIN_SAMPLES \
142
+ --tokenizer-type PretrainedFromHF \
143
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
144
+ --embed-layernorm \
145
+ --fp16 \
146
+ --seed 42 \
147
+ --position-embedding-type alibi \
148
+ --checkpoint-activations \
149
+ --abort-on-unmet-fused-kernel-constraints \
150
+ --pad-vocab-size-to 51200 \
151
+ $OPTIMIZER_ARGS \
152
+ $EXIT_OPTS \
153
+ "
154
+
155
+ # TODO: decide on efficient eval-interval + eval-iters
156
+
157
+ OUTPUT_ARGS=" \
158
+ --log-interval 1 \
159
+ --save-interval $SAVE_INTERVAL \
160
+ --eval-interval 1000 \
161
+ --eval-iters 1 \
162
+ --tensorboard-dir $TENSORBOARD_PATH \
163
+ --tensorboard-queue-size 5 \
164
+ --log-timers-to-tensorboard \
165
+ --log-batch-size-to-tensorboard \
166
+ --log-validation-ppl-to-tensorboard \
167
+ "
168
+
169
+ ZERO_STAGE=1
170
+
171
+ config_json="./ds_config.$SLURM_JOBID.json"
172
+
173
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
174
+ cat <<EOT > $config_json
175
+ {
176
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
177
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
178
+ "gradient_clipping": 1.0,
179
+ "zero_optimization": {
180
+ "stage": $ZERO_STAGE
181
+ },
182
+ "fp16": {
183
+ "enabled": true,
184
+ "loss_scale": 0,
185
+ "loss_scale_window": 500,
186
+ "hysteresis": 2,
187
+ "min_loss_scale": 1,
188
+ "initial_scale_power": 12
189
+ },
190
+ "steps_per_print": 2000,
191
+ "wall_clock_breakdown": false
192
+ }
193
+ EOT
194
+
195
+
196
+ DEEPSPEED_ARGS=" \
197
+ --deepspeed \
198
+ --deepspeed_config ${config_json} \
199
+ --zero-stage ${ZERO_STAGE} \
200
+ --deepspeed-activation-checkpointing \
201
+ "
202
+
203
+ export LAUNCHER="python -u -m torch.distributed.run \
204
+ --nproc_per_node $GPUS_PER_NODE \
205
+ --nnodes $NNODES \
206
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
207
+ --rdzv_backend c10d \
208
+ --max_restarts 0 \
209
+ --tee 3 \
210
+ "
211
+
212
+ export CMD=" \
213
+ `pwd`/pretrain_gpt.py \
214
+ --tensor-model-parallel-size $TP_SIZE \
215
+ --pipeline-model-parallel-size $PP_SIZE \
216
+ $GPT_ARGS \
217
+ $OUTPUT_ARGS \
218
+ $MUP_ARGS \
219
+ --save $CHECKPOINT_PATH \
220
+ --load $CHECKPOINT_PATH \
221
+ --data-path $DATA_PATH \
222
+ --data-impl mmap \
223
+ --distributed-backend nccl \
224
+ $DEEPSPEED_ARGS \
225
+ "
226
+
227
+ echo $CMD
228
+
229
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
230
+ export CUDA_LAUNCH_BLOCKING=1
231
+
232
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
233
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
234
+
235
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
236
+
237
+ echo "END TIME: $(date)"
train/tr14-mup/tr14-2B7-mup.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --qos=qos_gpu-t3
3
+ #SBATCH --nodes=8
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=16 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:8 # number of gpus
8
+ #SBATCH --time 05:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x.out # output file name
10
+ #SBATCH --partition=gpu_p5
11
+ #SBATCH --account=ajs@a100
12
+ #SBATCH -C a100
13
+
14
+ set -x -e
15
+
16
+ #source $ajs_ALL_CCFRWORK/start-py38-pt110
17
+ #source $ajs_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_PATH=$ajs_ALL_CCFRSCRATCH/datasets/c4/gpt2tok_c4_text_document
25
+ DATA_OUTPUT_PATH=$ajs_ALL_CCFRSCRATCH/checkpoints/tr14-2B7-lr$1-init0.1-inpm10-outm10-atnm10-mup
26
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
27
+ REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-test-lr$1-init0.1-inpm10-outm10-atnm10-mup
28
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
29
+ LOGS_PATH=$REPO_PATH/logs/$variant
30
+ mkdir -p $LOGS_PATH
31
+
32
+ MEGATRON_DEEPSPEED_REPO=$ajs_ALL_CCFRWORK/code/Megatron-DeepSpeed
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ TOKENIZER_NAME_OR_PATH=t5-small
36
+
37
+ # defining the right environment variables
38
+ export TRANSFORMERS_CACHE=$ajs_ALL_CCFRWORK/models
39
+ export HF_DATASETS_CACHE=$ajs_ALL_CCFRWORK/datasets
40
+ export HF_MODULES_CACHE=$ajs_ALL_CCFRWORK/modules
41
+ export HF_METRICS_CACHE=$ajs_ALL_CCFRWORK/metrics
42
+ export HF_DATASETS_OFFLINE=1
43
+ export TRANSFORMERS_OFFLINE=1
44
+
45
+ # testing for potential faulty nodes
46
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
47
+
48
+ # so processes know who to talk to
49
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
50
+ MASTER_PORT=6000
51
+
52
+ GPUS_PER_NODE=8
53
+ NNODES=$SLURM_NNODES
54
+
55
+ PP_SIZE=1
56
+ TP_SIZE=2
57
+
58
+ MICRO_BATCH_SIZE=16
59
+ GLOBAL_BATCH_SIZE=512
60
+
61
+ NLAYERS=32
62
+ NHIDDEN=2560
63
+ NHEADS=32
64
+ SEQ_LEN=2048
65
+
66
+ SAVE_INTERVAL=250
67
+
68
+ TRAIN_SAMPLES=1_953_125 # 50B tokens
69
+ LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount
70
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
71
+
72
+
73
+ MUP_ARGS=" \
74
+ --lr $1 \
75
+ --min-lr `bc <<< "scale=3; $1/10"` \
76
+ --init-method-std 0.1 \
77
+ --mup \
78
+ --mup-input-mult 10 \
79
+ --mup-output-mult 10 \
80
+ --mup-attn-mult 10 \
81
+ "
82
+
83
+
84
+ OPTIMIZER_ARGS=" \
85
+ --optimizer adam \
86
+ --adam-beta1 0.9 \
87
+ --adam-beta2 0.95 \
88
+ --adam-eps 1e-8 \
89
+ --lr-decay-style cosine \
90
+ --lr-decay-samples $LR_DECAY_SAMPLES \
91
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
92
+ --clip-grad 1.0 \
93
+ --weight-decay 1e-1 \
94
+ "
95
+ # for 20h 1190, for 100h 5990
96
+ EXIT_OPTS=" \
97
+ --exit-duration-in-mins 1190 \
98
+ "
99
+
100
+ GPT_ARGS=" \
101
+ --pp-partition-method 'type:transformer' \
102
+ --num-layers $NLAYERS \
103
+ --hidden-size $NHIDDEN \
104
+ --num-attention-heads $NHEADS \
105
+ --seq-length $SEQ_LEN \
106
+ --max-position-embeddings $SEQ_LEN \
107
+ --micro-batch-size $MICRO_BATCH_SIZE \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --embed-layernorm \
113
+ --fp16 \
114
+ --seed 42 \
115
+ --position-embedding-type alibi \
116
+ --checkpoint-activations \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 51200 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=1
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ $MUP_ARGS \
187
+ --save $CHECKPOINT_PATH \
188
+ --load $CHECKPOINT_PATH \
189
+ --data-path $DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr14-mup/tr14-39M-grid-search-mup.slurm ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --qos=qos_gpu-t3
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:8 # number of gpus
8
+ #SBATCH --time 04:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x.out # output file name
10
+ #SBATCH --partition=gpu_p5
11
+ #SBATCH --account=ajs@a100
12
+ #SBATCH -C a100
13
+
14
+ set -x -e
15
+
16
+ #source $ajs_ALL_CCFRWORK/start-py38-pt110
17
+ #source $ajs_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_PATH=$ajs_ALL_CCFRSCRATCH/datasets/c4/gpt2tok_c4_text_document
25
+ DATA_OUTPUT_PATH=$ajs_ALL_CCFRSCRATCH/checkpoints/tr14-39M-lr$1-init$2-inpm$3-outm$4-atnm$5-mup
26
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
27
+ REPO_PATH=$DATA_OUTPUT_PATH/tr14-39M-lr$1-init$2-inpm$3-outm$4-atnm$5-mup-logs
28
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
29
+ LOGS_PATH=$REPO_PATH/logs/$variant
30
+ mkdir -p $LOGS_PATH
31
+
32
+ MEGATRON_DEEPSPEED_REPO=$ajs_ALL_CCFRWORK/code/Megatron-DeepSpeed
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ TOKENIZER_NAME_OR_PATH=gpt2
36
+
37
+ # defining the right environment variables
38
+ export TRANSFORMERS_CACHE=$ajs_ALL_CCFRWORK/models
39
+ export HF_DATASETS_CACHE=$ajs_ALL_CCFRWORK/datasets
40
+ export HF_MODULES_CACHE=$ajs_ALL_CCFRWORK/modules
41
+ export HF_METRICS_CACHE=$ajs_ALL_CCFRWORK/metrics
42
+ export HF_DATASETS_OFFLINE=1
43
+ export TRANSFORMERS_OFFLINE=1
44
+
45
+ # testing for potential faulty nodes
46
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
47
+
48
+ # so processes know who to talk to
49
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
50
+ MASTER_PORT=6000
51
+
52
+ GPUS_PER_NODE=8
53
+ NNODES=$SLURM_NNODES
54
+
55
+ PP_SIZE=1
56
+ TP_SIZE=1
57
+
58
+ MICRO_BATCH_SIZE=32
59
+ GLOBAL_BATCH_SIZE=512
60
+
61
+ NLAYERS=32
62
+ NHIDDEN=256
63
+ NHEADS=8
64
+ SEQ_LEN=2048
65
+
66
+ SAVE_INTERVAL=1000
67
+
68
+ TRAIN_SAMPLES=1_953_125 # 40B tokens
69
+ LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount
70
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
71
+
72
+
73
+ MUP_ARGS=" \
74
+ --lr $1 \
75
+ --min-lr `bc <<< "scale=3; $1/10"` \
76
+ --init-method-std $2 \
77
+ --mup \
78
+ --mup-input-mult $3 \
79
+ --mup-output-mult $4 \
80
+ --mup-attn-mult $5 \
81
+ "
82
+
83
+
84
+ OPTIMIZER_ARGS=" \
85
+ --optimizer adam \
86
+ --adam-beta1 0.9 \
87
+ --adam-beta2 0.95 \
88
+ --adam-eps 1e-8 \
89
+ --lr-decay-style cosine \
90
+ --lr-decay-samples $LR_DECAY_SAMPLES \
91
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
92
+ --clip-grad 1.0 \
93
+ --weight-decay 1e-1 \
94
+ "
95
+ # for 20h 1190, for 100h 5990
96
+ EXIT_OPTS=" \
97
+ --exit-duration-in-mins 1190 \
98
+ "
99
+
100
+ GPT_ARGS=" \
101
+ --pp-partition-method 'type:transformer' \
102
+ --num-layers $NLAYERS \
103
+ --hidden-size $NHIDDEN \
104
+ --num-attention-heads $NHEADS \
105
+ --seq-length $SEQ_LEN \
106
+ --max-position-embeddings $SEQ_LEN \
107
+ --micro-batch-size $MICRO_BATCH_SIZE \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --embed-layernorm \
113
+ --fp16 \
114
+ --seed 42 \
115
+ --position-embedding-type alibi \
116
+ --checkpoint-activations \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 51200 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 10 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 100 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=1
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ $MUP_ARGS \
187
+ --save $CHECKPOINT_PATH \
188
+ --data-path $DATA_PATH \
189
+ --data-impl mmap \
190
+ --distributed-backend nccl \
191
+ $DEEPSPEED_ARGS \
192
+ "
193
+
194
+ echo $CMD
195
+
196
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
197
+ export CUDA_LAUNCH_BLOCKING=1
198
+
199
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
200
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
201
+
202
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
203
+
204
+ echo "END TIME: $(date)"