diff --git a/evaluation/results/tr11/README.md b/evaluation/results/tr11/README.md new file mode 100644 index 0000000000000000000000000000000000000000..afb0afc9dc0d433048f2bd1f39c27db3f5953e9d --- /dev/null +++ b/evaluation/results/tr11/README.md @@ -0,0 +1,11 @@ +# BigScience BLOOM Evaluation Results + +This folder contains evaluation results of the BLOOM model family. + +## Evaluation Procedure + +- bslmeval files were created using the below: + - https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/291 + - https://github.com/bigscience-workshop/lm-evaluation-harness +- humaneval files were created using the HumanEval code dataset with the below: + - https://github.com/loubnabnl/bloom-code-evaluation diff --git a/evaluation/results/tr11/bloom/bslmeval.json b/evaluation/results/tr11/bloom/bslmeval.json new file mode 100644 index 0000000000000000000000000000000000000000..2fc5e10587d788492bae7b796339ed5f7ebdd8de --- /dev/null +++ b/evaluation/results/tr11/bloom/bslmeval.json @@ -0,0 +1,3160 @@ +{ + "results": { + "arc_challenge": { + "2022-07-07-20-56-59": { + "acc": 0.4112627986348123, + "acc_norm": 0.44880546075085326, + "acc_norm_stderr": 0.01453459958509767, + "acc_stderr": 0.014379441068522077 + } + }, + "arc_easy": { + "2022-07-07-20-56-53": { + "acc": 0.726010101010101, + "acc_norm": 0.6738215488215489, + "acc_norm_stderr": 0.00961984941703518, + "acc_stderr": 0.009151805901544019 + } + }, + "axb+GPT-3 style": { + "2022-07-07-15-16-12": { + "acc": 0.43931159420289856, + "acc_norm": 0.5144927536231884, + "acc_norm_stderr": 0.015048725939283577, + "acc_stderr": 0.014943742111269621, + "prompt_name": "GPT-3 style", + "task_name": "axb" + } + }, + "axb+MNLI crowdsource": { + "2022-07-07-15-16-12": { + "acc": 0.5760869565217391, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.01487971643070736, + "prompt_name": "MNLI crowdsource", + "task_name": "axb" + } + }, + "axb+based on the previous passage": { + "2022-07-07-15-16-12": { + "acc": 0.5760869565217391, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.014879716430707356, + "prompt_name": "based on the previous passage", + "task_name": "axb" + } + }, + "axb+can we infer": { + "2022-07-07-15-16-12": { + "acc": 0.5507246376811594, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.014977378261696464, + "prompt_name": "can we infer", + "task_name": "axb" + } + }, + "axb+does it follow that": { + "2022-07-07-15-16-12": { + "acc": 0.4936594202898551, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.015053841027817174, + "prompt_name": "does it follow that", + "task_name": "axb" + } + }, + "axb+does this imply": { + "2022-07-07-15-16-12": { + "acc": 0.5833333333333334, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.014844481058991162, + "prompt_name": "does this imply", + "task_name": "axb" + } + }, + "axb+guaranteed true": { + "2022-07-07-15-16-12": { + "acc": 0.5742753623188406, + "acc_norm": 0.42028985507246375, + "acc_norm_stderr": 0.014862509583215737, + "acc_stderr": 0.014888012621293445, + "prompt_name": "guaranteed true", + "task_name": "axb" + } + }, + "axb+justified in saying": { + "2022-07-07-15-16-12": { + "acc": 0.5398550724637681, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.015007147683509258, + "prompt_name": "justified in saying", + "task_name": "axb" + } + }, + "axb+must be true": { + "2022-07-07-15-16-12": { + "acc": 0.5769927536231884, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.014875491592767387, + "prompt_name": "must be true", + "task_name": "axb" + } + }, + "axb+should assume": { + "2022-07-07-15-16-12": { + "acc": 0.5797101449275363, + "acc_norm": 0.4166666666666667, + "acc_norm_stderr": 0.014844481058991162, + "acc_stderr": 0.014862509583215737, + "prompt_name": "should assume", + "task_name": "axb" + } + }, + "axg+GPT-3 style": { + "2022-07-07-15-16-12": { + "acc": 0.5028089887640449, + "acc_norm": 0.5252808988764045, + "acc_norm_stderr": 0.026503301742331602, + "acc_stderr": 0.026536825838510643, + "parity": 0.9943820224719101, + "parity_stderr": 0.005617977528089883, + "prompt_name": "GPT-3 style", + "task_name": "axg" + } + }, + "axg+MNLI crowdsource": { + "2022-07-07-15-16-12": { + "acc": 0.5, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026537244621713762, + "parity": 1.0, + "parity_stderr": 0.0, + "prompt_name": "MNLI crowdsource", + "task_name": "axg" + } + }, + "axg+based on the previous passage": { + "2022-07-07-15-16-12": { + "acc": 0.5674157303370787, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026294921016736104, + "parity": 0.9101123595505618, + "parity_stderr": 0.0214986338475263, + "prompt_name": "based on the previous passage", + "task_name": "axg" + } + }, + "axg+can we infer": { + "2022-07-07-15-16-12": { + "acc": 0.5393258426966292, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.02645503642756265, + "parity": 0.9325842696629213, + "parity_stderr": 0.01884681777754791, + "prompt_name": "can we infer", + "task_name": "axg" + } + }, + "axg+does it follow that": { + "2022-07-07-15-16-12": { + "acc": 0.5646067415730337, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026314777562240214, + "parity": 0.8595505617977528, + "parity_stderr": 0.026116145785378943, + "prompt_name": "does it follow that", + "task_name": "axg" + } + }, + "axg+does this imply": { + "2022-07-07-15-16-12": { + "acc": 0.5, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026537244621713762, + "parity": 1.0, + "parity_stderr": 0.0, + "prompt_name": "does this imply", + "task_name": "axg" + } + }, + "axg+guaranteed true": { + "2022-07-07-15-16-12": { + "acc": 0.5140449438202247, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026526773058212962, + "parity": 0.9943820224719101, + "parity_stderr": 0.005617977528089871, + "prompt_name": "guaranteed true", + "task_name": "axg" + } + }, + "axg+justified in saying": { + "2022-07-07-15-16-12": { + "acc": 0.5617977528089888, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026333775118002358, + "parity": 0.9438202247191011, + "parity_stderr": 0.017308044589604655, + "prompt_name": "justified in saying", + "task_name": "axg" + } + }, + "axg+must be true": { + "2022-07-07-15-16-12": { + "acc": 0.5308988764044944, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026486523782404646, + "parity": 0.9382022471910112, + "parity_stderr": 0.018098723392996646, + "prompt_name": "must be true", + "task_name": "axg" + } + }, + "axg+should assume": { + "2022-07-07-15-16-12": { + "acc": 0.5196629213483146, + "acc_norm": 0.5, + "acc_norm_stderr": 0.026537244621713762, + "acc_stderr": 0.026516716466795417, + "parity": 0.9382022471910112, + "parity_stderr": 0.018098723392996636, + "prompt_name": "should assume", + "task_name": "axg" + } + }, + "boolq": { + "2022-07-07-20-56-57": { + "acc": 0.7039755351681957, + "acc_stderr": 0.007984268354724199 + } + }, + "boolq+GPT-3 Style": { + "2022-07-07-15-16-31": { + "acc": 0.6993883792048929, + "acc_norm": 0.7299694189602447, + "acc_norm_stderr": 0.007765176800187587, + "acc_stderr": 0.00801963547470537, + "prompt_name": "GPT-3 Style", + "task_name": "boolq" + } + }, + "boolq+I wonder\u2026": { + "2022-07-07-15-16-31": { + "acc": 0.6345565749235474, + "acc_norm": 0.6214067278287462, + "acc_norm_stderr": 0.008483341718024479, + "acc_stderr": 0.00842243737006271, + "prompt_name": "I wonder\u2026", + "task_name": "boolq" + } + }, + "boolq+after_reading": { + "2022-07-07-15-16-31": { + "acc": 0.6214067278287462, + "acc_norm": 0.6162079510703364, + "acc_norm_stderr": 0.008505584729105, + "acc_stderr": 0.008483341718024479, + "prompt_name": "after_reading", + "task_name": "boolq" + } + }, + "boolq+based on the following passage": { + "2022-07-07-15-16-31": { + "acc": 0.38256880733944953, + "acc_norm": 0.5657492354740061, + "acc_norm_stderr": 0.008669116184243037, + "acc_stderr": 0.008500443818876156, + "prompt_name": "based on the following passage", + "task_name": "boolq" + } + }, + "boolq+based on the previous passage": { + "2022-07-07-15-16-31": { + "acc": 0.6642201834862386, + "acc_norm": 0.6241590214067279, + "acc_norm_stderr": 0.00847114724816011, + "acc_stderr": 0.008259920504139585, + "prompt_name": "based on the previous passage", + "task_name": "boolq" + } + }, + "boolq+could you tell me\u2026": { + "2022-07-07-15-16-31": { + "acc": 0.655045871559633, + "acc_norm": 0.6217125382262997, + "acc_norm_stderr": 0.00848200113393099, + "acc_stderr": 0.00831398181257226, + "prompt_name": "could you tell me\u2026", + "task_name": "boolq" + } + }, + "boolq+exam": { + "2022-07-07-15-16-31": { + "acc": 0.6507645259938838, + "acc_norm": 0.6232415902140673, + "acc_norm_stderr": 0.008475244400491449, + "acc_stderr": 0.008338033790721228, + "prompt_name": "exam", + "task_name": "boolq" + } + }, + "boolq+exercise": { + "2022-07-07-15-16-31": { + "acc": 0.6217125382262997, + "acc_norm": 0.6229357798165137, + "acc_norm_stderr": 0.008476602927953715, + "acc_stderr": 0.00848200113393099, + "prompt_name": "exercise", + "task_name": "boolq" + } + }, + "boolq+valid_binary": { + "2022-07-07-15-16-31": { + "acc": 0.6253822629969419, + "acc_norm": 0.6125382262996942, + "acc_norm_stderr": 0.008520666536136938, + "acc_stderr": 0.00846563398343193, + "prompt_name": "valid_binary", + "task_name": "boolq" + } + }, + "boolq+yes_no_question": { + "2022-07-07-15-16-31": { + "acc": 0.42629969418960245, + "acc_norm": 0.6250764525993884, + "acc_norm_stderr": 0.008467017704332997, + "acc_stderr": 0.008649531625805666, + "prompt_name": "yes_no_question", + "task_name": "boolq" + } + }, + "cb+GPT-3 style": { + "2022-07-07-15-16-21": { + "acc": 0.4642857142857143, + "acc_stderr": 0.06724777654937658, + "f1": 0.3849206349206349, + "prompt_name": "GPT-3 style", + "task_name": "cb" + } + }, + "cb+MNLI crowdsource": { + "2022-07-07-15-16-21": { + "acc": 0.19642857142857142, + "acc_stderr": 0.05357142857142858, + "f1": 0.1815172191045258, + "prompt_name": "MNLI crowdsource", + "task_name": "cb" + } + }, + "cb+always/sometimes/never": { + "2022-07-07-15-16-21": { + "acc": 0.125, + "acc_stderr": 0.04459412925079224, + "f1": 0.11230856494611458, + "prompt_name": "always/sometimes/never", + "task_name": "cb" + } + }, + "cb+based on the previous passage": { + "2022-07-07-15-16-21": { + "acc": 0.375, + "acc_stderr": 0.06527912098338669, + "f1": 0.26351351351351354, + "prompt_name": "based on the previous passage", + "task_name": "cb" + } + }, + "cb+can we infer": { + "2022-07-07-15-16-21": { + "acc": 0.375, + "acc_stderr": 0.06527912098338669, + "f1": 0.2587301587301587, + "prompt_name": "can we infer", + "task_name": "cb" + } + }, + "cb+claim true/false/inconclusive": { + "2022-07-07-15-16-21": { + "acc": 0.39285714285714285, + "acc_stderr": 0.0658538889806635, + "f1": 0.3126633404609171, + "prompt_name": "claim true/false/inconclusive", + "task_name": "cb" + } + }, + "cb+consider always/sometimes/never": { + "2022-07-07-15-16-21": { + "acc": 0.10714285714285714, + "acc_stderr": 0.0417053005800816, + "f1": 0.08333333333333333, + "prompt_name": "consider always/sometimes/never", + "task_name": "cb" + } + }, + "cb+does it follow that": { + "2022-07-07-15-16-21": { + "acc": 0.44642857142857145, + "acc_stderr": 0.06703189227942398, + "f1": 0.31950617283950616, + "prompt_name": "does it follow that", + "task_name": "cb" + } + }, + "cb+does this imply": { + "2022-07-07-15-16-21": { + "acc": 0.07142857142857142, + "acc_stderr": 0.0347266024860284, + "f1": 0.04519774011299435, + "prompt_name": "does this imply", + "task_name": "cb" + } + }, + "cb+guaranteed true": { + "2022-07-07-15-16-21": { + "acc": 0.4642857142857143, + "acc_stderr": 0.06724777654937658, + "f1": 0.384992784992785, + "prompt_name": "guaranteed true", + "task_name": "cb" + } + }, + "cb+guaranteed/possible/impossible": { + "2022-07-07-15-16-21": { + "acc": 0.08928571428571429, + "acc_stderr": 0.038450387280282494, + "f1": 0.05649717514124294, + "prompt_name": "guaranteed/possible/impossible", + "task_name": "cb" + } + }, + "cb+justified in saying": { + "2022-07-07-15-16-21": { + "acc": 0.32142857142857145, + "acc_stderr": 0.06297362289056341, + "f1": 0.22660818713450293, + "prompt_name": "justified in saying", + "task_name": "cb" + } + }, + "cb+must be true": { + "2022-07-07-15-16-21": { + "acc": 0.35714285714285715, + "acc_stderr": 0.06460957383809221, + "f1": 0.2982905982905983, + "prompt_name": "must be true", + "task_name": "cb" + } + }, + "cb+should assume": { + "2022-07-07-15-16-21": { + "acc": 0.3392857142857143, + "acc_stderr": 0.06384226561930825, + "f1": 0.2604166666666667, + "prompt_name": "should assume", + "task_name": "cb" + } + }, + "cb+take the following as truth": { + "2022-07-07-15-16-21": { + "acc": 0.32142857142857145, + "acc_stderr": 0.06297362289056341, + "f1": 0.16901408450704228, + "prompt_name": "take the following as truth", + "task_name": "cb" + } + }, + "cola+Following sentence acceptable": { + "2022-07-07-15-16-32": { + "acc": 0.6069031639501438, + "acc_norm": 0.31351869606903165, + "acc_norm_stderr": 0.014371834902632595, + "acc_stderr": 0.015131278175045317, + "prompt_name": "Following sentence acceptable", + "task_name": "cola" + } + }, + "cola+Make sense yes no": { + "2022-07-07-15-16-32": { + "acc": 0.5522531160115053, + "acc_norm": 0.6912751677852349, + "acc_norm_stderr": 0.014311244461311297, + "acc_stderr": 0.01540463860229955, + "prompt_name": "Make sense yes no", + "task_name": "cola" + } + }, + "cola+Previous sentence acceptable": { + "2022-07-07-15-16-32": { + "acc": 0.35282837967401726, + "acc_norm": 0.6912751677852349, + "acc_norm_stderr": 0.014311244461311299, + "acc_stderr": 0.014803268890263814, + "prompt_name": "Previous sentence acceptable", + "task_name": "cola" + } + }, + "cola+editing": { + "2022-07-07-15-16-32": { + "acc": 0.3077660594439118, + "acc_norm": 0.6912751677852349, + "acc_norm_stderr": 0.014311244461311299, + "acc_stderr": 0.014298910475462598, + "prompt_name": "editing", + "task_name": "cola" + } + }, + "cola+is_this_correct": { + "2022-07-07-15-16-32": { + "acc": 0.39022051773729627, + "acc_norm": 0.6912751677852349, + "acc_norm_stderr": 0.014311244461311299, + "acc_stderr": 0.015111500662688604, + "prompt_name": "is_this_correct", + "task_name": "cola" + } + }, + "copa": { + "2022-07-07-20-56-53": { + "acc": 0.87, + "acc_stderr": 0.033799766898963086 + } + }, + "copa+C1 or C2? premise, so/because\u2026": { + "2022-07-07-15-16-25": { + "acc": 0.75, + "acc_norm": 0.7, + "acc_norm_stderr": 0.046056618647183814, + "acc_stderr": 0.04351941398892446, + "prompt_name": "C1 or C2? premise, so/because\u2026", + "task_name": "copa" + } + }, + "copa+best_option": { + "2022-07-07-15-16-25": { + "acc": 0.49, + "acc_norm": 0.46, + "acc_norm_stderr": 0.05009082659620332, + "acc_stderr": 0.05024183937956912, + "prompt_name": "best_option", + "task_name": "copa" + } + }, + "copa+cause_effect": { + "2022-07-07-15-16-25": { + "acc": 0.64, + "acc_norm": 0.51, + "acc_norm_stderr": 0.05024183937956911, + "acc_stderr": 0.048241815132442176, + "prompt_name": "cause_effect", + "task_name": "copa" + } + }, + "copa+choose": { + "2022-07-07-15-16-25": { + "acc": 0.56, + "acc_norm": 0.48, + "acc_norm_stderr": 0.050211673156867795, + "acc_stderr": 0.049888765156985884, + "prompt_name": "choose", + "task_name": "copa" + } + }, + "copa+exercise": { + "2022-07-07-15-16-25": { + "acc": 0.45, + "acc_norm": 0.48, + "acc_norm_stderr": 0.050211673156867795, + "acc_stderr": 0.05, + "prompt_name": "exercise", + "task_name": "copa" + } + }, + "copa+i_am_hesitating": { + "2022-07-07-15-16-25": { + "acc": 0.56, + "acc_norm": 0.55, + "acc_norm_stderr": 0.05, + "acc_stderr": 0.04988876515698589, + "prompt_name": "i_am_hesitating", + "task_name": "copa" + } + }, + "copa+more likely": { + "2022-07-07-15-16-25": { + "acc": 0.55, + "acc_norm": 0.51, + "acc_norm_stderr": 0.05024183937956912, + "acc_stderr": 0.05, + "prompt_name": "more likely", + "task_name": "copa" + } + }, + "copa+plausible_alternatives": { + "2022-07-07-15-16-25": { + "acc": 0.55, + "acc_norm": 0.47, + "acc_norm_stderr": 0.050161355804659205, + "acc_stderr": 0.049999999999999996, + "prompt_name": "plausible_alternatives", + "task_name": "copa" + } + }, + "crows_pairs_english+1": { + "2022-07-07-15-16-45": { + "acc": 0.49552772808586765, + "acc_norm": 0.49552772808586765, + "acc_norm_stderr": 0.012212810647205384, + "acc_stderr": 0.012212810647205384, + "prompt_name": "1", + "task_name": "crows_pairs_english" + } + }, + "crows_pairs_english+2": { + "2022-07-07-15-16-45": { + "acc": 0.4883720930232558, + "acc_norm": 0.4883720930232558, + "acc_norm_stderr": 0.012209996095069646, + "acc_stderr": 0.012209996095069646, + "prompt_name": "2", + "task_name": "crows_pairs_english" + } + }, + "crows_pairs_english+3": { + "2022-07-07-15-16-45": { + "acc": 0.516994633273703, + "acc_norm": 0.4806201550387597, + "acc_norm_stderr": 0.012204121667933781, + "acc_stderr": 0.012206242349351725, + "prompt_name": "3", + "task_name": "crows_pairs_english" + } + }, + "crows_pairs_english+4": { + "2022-07-07-15-16-45": { + "acc": 0.5044722719141324, + "acc_norm": 0.5044722719141324, + "acc_norm_stderr": 0.012212810647205388, + "acc_stderr": 0.012212810647205388, + "prompt_name": "4", + "task_name": "crows_pairs_english" + } + }, + "crows_pairs_english+A_preference": { + "2022-07-07-15-16-45": { + "acc": 0.5116279069767442, + "acc_norm": 0.5116279069767442, + "acc_norm_stderr": 0.012209996095069644, + "acc_stderr": 0.012209996095069644, + "prompt_name": "A_preference", + "task_name": "crows_pairs_english" + } + }, + "crows_pairs_english+A_stereotype_true": { + "2022-07-07-15-16-45": { + "acc": 0.49254621347644606, + "acc_norm": 0.5062611806797853, + "acc_norm_stderr": 0.012212341600228735, + "acc_stderr": 0.012211942027483493, + "prompt_name": "A_stereotype_true", + "task_name": "crows_pairs_english" + } + }, + "crows_pairs_french+1_fr": { + "2022-07-07-15-16-45": { + "acc": 0.49552772808586765, + "acc_norm": 0.49552772808586765, + "acc_norm_stderr": 0.012212810647205384, + "acc_stderr": 0.012212810647205384, + "prompt_name": "1_fr", + "task_name": "crows_pairs_french" + } + }, + "crows_pairs_french+2_fr": { + "2022-07-07-15-16-45": { + "acc": 0.4883720930232558, + "acc_norm": 0.4883720930232558, + "acc_norm_stderr": 0.012209996095069646, + "acc_stderr": 0.012209996095069646, + "prompt_name": "2_fr", + "task_name": "crows_pairs_french" + } + }, + "crows_pairs_french+3_fr": { + "2022-07-07-15-16-45": { + "acc": 0.5259391771019678, + "acc_norm": 0.5259391771019678, + "acc_norm_stderr": 0.012196852930770321, + "acc_stderr": 0.012196852930770321, + "prompt_name": "3_fr", + "task_name": "crows_pairs_french" + } + }, + "crows_pairs_french+4_fr": { + "2022-07-07-15-16-45": { + "acc": 0.505664877757901, + "acc_norm": 0.505664877757901, + "acc_norm_stderr": 0.012212515323431726, + "acc_stderr": 0.012212515323431726, + "prompt_name": "4_fr", + "task_name": "crows_pairs_french" + } + }, + "crows_pairs_french+A_preference_fr": { + "2022-07-07-15-16-45": { + "acc": 0.5116279069767442, + "acc_norm": 0.5116279069767442, + "acc_norm_stderr": 0.012209996095069644, + "acc_stderr": 0.012209996095069644, + "prompt_name": "A_preference_fr", + "task_name": "crows_pairs_french" + } + }, + "crows_pairs_french+A_reality_check_fr": { + "2022-07-07-15-16-45": { + "acc": 0.5104353011329755, + "acc_norm": 0.5104353011329755, + "acc_norm_stderr": 0.012210638982043403, + "acc_stderr": 0.012210638982043403, + "prompt_name": "A_reality_check_fr", + "task_name": "crows_pairs_french" + } + }, + "crows_pairs_french+A_stereotype_true_fr": { + "2022-07-07-15-16-45": { + "acc": 0.5050685748360167, + "acc_norm": 0.5050685748360167, + "acc_norm_stderr": 0.01221267167220127, + "acc_stderr": 0.01221267167220127, + "prompt_name": "A_stereotype_true_fr", + "task_name": "crows_pairs_french" + } + }, + "diabla+Is the error present? (same lang)": { + "2022-07-07-15-16-43": { + "acc": 0.07602644398051496, + "acc_norm": 0.06924147529575504, + "acc_norm_stderr": 0.003348737218649089, + "acc_stderr": 0.0034961617024621885, + "prompt_name": "Is the error present? (same lang)", + "task_name": "diabla" + } + }, + "diabla+Which is automatic?": { + "2022-07-07-15-16-43": { + "acc": 0.5135699373695198, + "acc_norm": 0.5135699373695198, + "acc_norm_stderr": 0.0065930960405032255, + "acc_stderr": 0.0065930960405032255, + "prompt_name": "Which is automatic?", + "task_name": "diabla" + } + }, + "gsarti/flores_101_afr+null": { + "2022-07-07-14-06-05": { + "bits_per_byte": 2.0889270277156933, + "byte_perplexity": 4.25431550058444, + "prompt_name": "null", + "task_name": "gsarti/flores_101_afr", + "word_perplexity": 6513.784244776627 + } + }, + "gsarti/flores_101_amh+null": { + "2022-07-07-14-06-19": { + "bits_per_byte": 1.8940911321767726, + "byte_perplexity": 3.716877477347089, + "prompt_name": "null", + "task_name": "gsarti/flores_101_amh", + "word_perplexity": 23562136.28489486 + } + }, + "gsarti/flores_101_ara+null": { + "2022-07-07-14-06-11": { + "bits_per_byte": 0.769689671439268, + "byte_perplexity": 1.7049030137120964, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ara", + "word_perplexity": 298.75235070287073 + } + }, + "gsarti/flores_101_asm+null": { + "2022-07-07-14-06-22": { + "bits_per_byte": 2.717337841226465, + "byte_perplexity": 6.576581380404954, + "prompt_name": "null", + "task_name": "gsarti/flores_101_asm", + "word_perplexity": 151261115092731.0 + } + }, + "gsarti/flores_101_ast+null": { + "2022-07-07-14-06-13": { + "bits_per_byte": 1.514115429965105, + "byte_perplexity": 2.8562364775797944, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ast", + "word_perplexity": 825.1769257551665 + } + }, + "gsarti/flores_101_azj+null": { + "2022-07-07-14-06-23": { + "bits_per_byte": 2.265201414551651, + "byte_perplexity": 4.80721528624391, + "prompt_name": "null", + "task_name": "gsarti/flores_101_azj", + "word_perplexity": 788701.305509992 + } + }, + "gsarti/flores_101_bel+null": { + "2022-07-07-14-06-04": { + "bits_per_byte": 1.4495443345547894, + "byte_perplexity": 2.7312177406635065, + "prompt_name": "null", + "task_name": "gsarti/flores_101_bel", + "word_perplexity": 385519.17852132686 + } + }, + "gsarti/flores_101_ben+null": { + "2022-07-07-14-06-11": { + "bits_per_byte": 2.583376944428484, + "byte_perplexity": 5.993409478990023, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ben", + "word_perplexity": 46231399749986.41 + } + }, + "gsarti/flores_101_bos+null": { + "2022-07-07-14-06-02": { + "bits_per_byte": 1.845436621383762, + "byte_perplexity": 3.5936169095529493, + "prompt_name": "null", + "task_name": "gsarti/flores_101_bos", + "word_perplexity": 5152.332178760028 + } + }, + "gsarti/flores_101_bul+null": { + "2022-07-07-14-06-08": { + "bits_per_byte": 1.1103868457638553, + "byte_perplexity": 2.159035321398085, + "prompt_name": "null", + "task_name": "gsarti/flores_101_bul", + "word_perplexity": 7592.1196769558 + } + }, + "gsarti/flores_101_cat+null": { + "2022-07-07-14-06-24": { + "bits_per_byte": 1.1162806946358026, + "byte_perplexity": 2.167873680006659, + "prompt_name": "null", + "task_name": "gsarti/flores_101_cat", + "word_perplexity": 107.68349889171907 + } + }, + "gsarti/flores_101_ceb+null": { + "2022-07-07-14-06-18": { + "bits_per_byte": 2.4024425293322333, + "byte_perplexity": 5.286975089885673, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ceb", + "word_perplexity": 22841.140090734538 + } + }, + "gsarti/flores_101_ces+null": { + "2022-07-07-14-06-00": { + "bits_per_byte": 1.7872739901497152, + "byte_perplexity": 3.4516208322236017, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ces", + "word_perplexity": 9146.455709982567 + } + }, + "gsarti/flores_101_ckb+null": { + "2022-07-07-14-21-13": { + "bits_per_byte": 1.8895138332583217, + "byte_perplexity": 3.7051034724765612, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ckb", + "word_perplexity": 7152440.931081667 + } + }, + "gsarti/flores_101_cym+null": { + "2022-07-07-14-22-34": { + "bits_per_byte": 2.8255681364680445, + "byte_perplexity": 7.0889312398688125, + "prompt_name": "null", + "task_name": "gsarti/flores_101_cym", + "word_perplexity": 93988.30506967758 + } + }, + "gsarti/flores_101_dan+null": { + "2022-07-07-14-06-21": { + "bits_per_byte": 1.7782400464960308, + "byte_perplexity": 3.4300748208111838, + "prompt_name": "null", + "task_name": "gsarti/flores_101_dan", + "word_perplexity": 2580.5099573175935 + } + }, + "gsarti/flores_101_deu+null": { + "2022-07-07-14-06-18": { + "bits_per_byte": 1.2253110829512435, + "byte_perplexity": 2.3380585896268107, + "prompt_name": "null", + "task_name": "gsarti/flores_101_deu", + "word_perplexity": 424.7487911449544 + } + }, + "gsarti/flores_101_ell+null": { + "2022-07-07-14-06-11": { + "bits_per_byte": 0.970530095565208, + "byte_perplexity": 1.9595604725375586, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ell", + "word_perplexity": 3227.33649640626 + } + }, + "gsarti/flores_101_eng+null": { + "2022-07-07-14-06-23": { + "bits_per_byte": 0.9122388509090102, + "byte_perplexity": 1.8819637649637901, + "prompt_name": "null", + "task_name": "gsarti/flores_101_eng", + "word_perplexity": 43.85102868602895 + } + }, + "gsarti/flores_101_est+null": { + "2022-07-07-14-06-20": { + "bits_per_byte": 2.529533778013975, + "byte_perplexity": 5.773850600380297, + "prompt_name": "null", + "task_name": "gsarti/flores_101_est", + "word_perplexity": 1076524.500370753 + } + }, + "gsarti/flores_101_fas+null": { + "2022-07-07-14-14-49": { + "bits_per_byte": 1.281320843833715, + "byte_perplexity": 2.4306140728294086, + "prompt_name": "null", + "task_name": "gsarti/flores_101_fas", + "word_perplexity": 4908.172101911933 + } + }, + "gsarti/flores_101_fin+null": { + "2022-07-07-14-06-22": { + "bits_per_byte": 2.105780489599803, + "byte_perplexity": 4.304305536244342, + "prompt_name": "null", + "task_name": "gsarti/flores_101_fin", + "word_perplexity": 579177.7197568177 + } + }, + "gsarti/flores_101_fra+null": { + "2022-07-07-14-06-11": { + "bits_per_byte": 0.9541731108108887, + "byte_perplexity": 1.9374688438541796, + "prompt_name": "null", + "task_name": "gsarti/flores_101_fra", + "word_perplexity": 68.98607421544799 + } + }, + "gsarti/flores_101_ful+null": { + "2022-07-07-14-06-15": { + "bits_per_byte": 3.2839740723460062, + "byte_perplexity": 9.740353097219378, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ful", + "word_perplexity": 311458.46164262504 + } + }, + "gsarti/flores_101_gle+null": { + "2022-07-07-14-06-15": { + "bits_per_byte": 2.5934182581448035, + "byte_perplexity": 6.035269765075012, + "prompt_name": "null", + "task_name": "gsarti/flores_101_gle", + "word_perplexity": 78448.0214710969 + } + }, + "gsarti/flores_101_glg+null": { + "2022-07-07-14-06-13": { + "bits_per_byte": 1.242115354935806, + "byte_perplexity": 2.365451129546636, + "prompt_name": "null", + "task_name": "gsarti/flores_101_glg", + "word_perplexity": 221.50059778506318 + } + }, + "gsarti/flores_101_guj+null": { + "2022-07-07-14-06-23": { + "bits_per_byte": 2.5126737684395337, + "byte_perplexity": 5.70676742569154, + "prompt_name": "null", + "task_name": "gsarti/flores_101_guj", + "word_perplexity": 716552866398.663 + } + }, + "gsarti/flores_101_hau+null": { + "2022-07-07-14-06-18": { + "bits_per_byte": 3.146525590123433, + "byte_perplexity": 8.855204288260023, + "prompt_name": "null", + "task_name": "gsarti/flores_101_hau", + "word_perplexity": 210586.4823224956 + } + }, + "gsarti/flores_101_heb+null": { + "2022-07-07-14-06-21": { + "bits_per_byte": 1.5464345997121398, + "byte_perplexity": 2.920943798471208, + "prompt_name": "null", + "task_name": "gsarti/flores_101_heb", + "word_perplexity": 73564.63229642312 + } + }, + "gsarti/flores_101_hin+null": { + "2022-07-07-14-06-06": { + "bits_per_byte": 2.4467929718326022, + "byte_perplexity": 5.452028001573195, + "prompt_name": "null", + "task_name": "gsarti/flores_101_hin", + "word_perplexity": 4425439866.173367 + } + }, + "gsarti/flores_101_hrv+null": { + "2022-07-07-14-06-20": { + "bits_per_byte": 1.8897394364016442, + "byte_perplexity": 3.7056829077179225, + "prompt_name": "null", + "task_name": "gsarti/flores_101_hrv", + "word_perplexity": 6900.120730244519 + } + }, + "gsarti/flores_101_hun+null": { + "2022-07-07-14-06-20": { + "bits_per_byte": 2.02097486601819, + "byte_perplexity": 4.058579478967854, + "prompt_name": "null", + "task_name": "gsarti/flores_101_hun", + "word_perplexity": 83269.28323528428 + } + }, + "gsarti/flores_101_hye+null": { + "2022-07-07-14-06-16": { + "bits_per_byte": 1.6448889355974015, + "byte_perplexity": 3.127237816041562, + "prompt_name": "null", + "task_name": "gsarti/flores_101_hye", + "word_perplexity": 7955380.444287513 + } + }, + "gsarti/flores_101_ibo+null": { + "2022-07-07-14-06-18": { + "bits_per_byte": 1.981865727696552, + "byte_perplexity": 3.9500357969906683, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ibo", + "word_perplexity": 6427.777777340096 + } + }, + "gsarti/flores_101_ind+null": { + "2022-07-07-14-06-23": { + "bits_per_byte": 0.9827023762328951, + "byte_perplexity": 1.976163584180101, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ind", + "word_perplexity": 130.55554436513884 + } + }, + "gsarti/flores_101_isl+null": { + "2022-07-07-14-06-19": { + "bits_per_byte": 2.4595738050085134, + "byte_perplexity": 5.500542085165231, + "prompt_name": "null", + "task_name": "gsarti/flores_101_isl", + "word_perplexity": 147157.07087099738 + } + }, + "gsarti/flores_101_ita+null": { + "2022-07-07-14-13-44": { + "bits_per_byte": 1.2106788087517997, + "byte_perplexity": 2.314465100752677, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ita", + "word_perplexity": 223.75576374226293 + } + }, + "gsarti/flores_101_jav+null": { + "2022-07-07-14-14-07": { + "bits_per_byte": 2.305189137915484, + "byte_perplexity": 4.942322446550142, + "prompt_name": "null", + "task_name": "gsarti/flores_101_jav", + "word_perplexity": 56927.36383380598 + } + }, + "gsarti/flores_101_jpn+null": { + "2022-07-07-14-13-48": { + "bits_per_byte": 1.175953593703319, + "byte_perplexity": 2.259421750521777, + "prompt_name": "null", + "task_name": "gsarti/flores_101_jpn", + "word_perplexity": 1.989482300539279e+53 + } + }, + "gsarti/flores_101_kam+null": { + "2022-07-07-14-14-23": { + "bits_per_byte": 3.2843698158997707, + "byte_perplexity": 9.743025325635475, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kam", + "word_perplexity": 1570523.993596921 + } + }, + "gsarti/flores_101_kan+null": { + "2022-07-07-14-14-36": { + "bits_per_byte": 2.6400944426125923, + "byte_perplexity": 6.233724699944989, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kan", + "word_perplexity": 2.579382882710284e+18 + } + }, + "gsarti/flores_101_kat+null": { + "2022-07-07-14-06-27": { + "bits_per_byte": 1.036249651422714, + "byte_perplexity": 2.0508893415872107, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kat", + "word_perplexity": 10991371.770506607 + } + }, + "gsarti/flores_101_kaz+null": { + "2022-07-07-14-13-54": { + "bits_per_byte": 1.603603725310636, + "byte_perplexity": 3.0390148516287927, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kaz", + "word_perplexity": 8102444.2336702505 + } + }, + "gsarti/flores_101_kea+null": { + "2022-07-07-14-13-45": { + "bits_per_byte": 2.837364488186208, + "byte_perplexity": 7.147132270533836, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kea", + "word_perplexity": 95562.77879078267 + } + }, + "gsarti/flores_101_khm+null": { + "2022-07-07-14-14-10": { + "bits_per_byte": 1.7512557688127433, + "byte_perplexity": 3.366514710252477, + "prompt_name": "null", + "task_name": "gsarti/flores_101_khm", + "word_perplexity": 4.197005695076373e+39 + } + }, + "gsarti/flores_101_kir+null": { + "2022-07-07-14-14-07": { + "bits_per_byte": 1.6966101823953679, + "byte_perplexity": 3.2413845359487885, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kir", + "word_perplexity": 19039163.450140566 + } + }, + "gsarti/flores_101_kor+null": { + "2022-07-07-14-14-04": { + "bits_per_byte": 1.537206420019471, + "byte_perplexity": 2.9023196482741027, + "prompt_name": "null", + "task_name": "gsarti/flores_101_kor", + "word_perplexity": 53727.19279762784 + } + }, + "gsarti/flores_101_lao+null": { + "2022-07-07-14-14-09": { + "bits_per_byte": 1.2212255445422597, + "byte_perplexity": 2.331446855837494, + "prompt_name": "null", + "task_name": "gsarti/flores_101_lao", + "word_perplexity": 1.755073374034173e+21 + } + }, + "gsarti/flores_101_lav+null": { + "2022-07-07-14-14-08": { + "bits_per_byte": 2.385046916201485, + "byte_perplexity": 5.223609016485348, + "prompt_name": "null", + "task_name": "gsarti/flores_101_lav", + "word_perplexity": 470678.0114749354 + } + }, + "gsarti/flores_101_lin+null": { + "2022-07-07-14-14-26": { + "bits_per_byte": 2.2772323281794145, + "byte_perplexity": 4.847471204107301, + "prompt_name": "null", + "task_name": "gsarti/flores_101_lin", + "word_perplexity": 12145.180963052675 + } + }, + "gsarti/flores_101_lit+null": { + "2022-07-07-14-14-22": { + "bits_per_byte": 2.1837099441165986, + "byte_perplexity": 4.5432035498036765, + "prompt_name": "null", + "task_name": "gsarti/flores_101_lit", + "word_perplexity": 178903.9319643639 + } + }, + "gsarti/flores_101_ltz+null": { + "2022-07-07-14-14-21": { + "bits_per_byte": 2.4831196849499797, + "byte_perplexity": 5.5910516978201015, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ltz", + "word_perplexity": 170503.0141233499 + } + }, + "gsarti/flores_101_lug+null": { + "2022-07-07-14-06-22": { + "bits_per_byte": 2.440980093744309, + "byte_perplexity": 5.4301049946044175, + "prompt_name": "null", + "task_name": "gsarti/flores_101_lug", + "word_perplexity": 499718.8154062185 + } + }, + "gsarti/flores_101_luo+null": { + "2022-07-07-14-14-25": { + "bits_per_byte": 3.588688237531865, + "byte_perplexity": 12.031029857399394, + "prompt_name": "null", + "task_name": "gsarti/flores_101_luo", + "word_perplexity": 1370453.058608639 + } + }, + "gsarti/flores_101_mal+null": { + "2022-07-07-14-14-25": { + "bits_per_byte": 2.26132095423743, + "byte_perplexity": 4.794302548141229, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mal", + "word_perplexity": 3.388536066729448e+18 + } + }, + "gsarti/flores_101_mar+null": { + "2022-07-07-14-14-30": { + "bits_per_byte": 2.7775106680155135, + "byte_perplexity": 6.856682255407709, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mar", + "word_perplexity": 3438386422218656.5 + } + }, + "gsarti/flores_101_mkd+null": { + "2022-07-07-14-14-29": { + "bits_per_byte": 1.223678604492143, + "byte_perplexity": 2.3354144607382983, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mkd", + "word_perplexity": 18353.643927570258 + } + }, + "gsarti/flores_101_mlt+null": { + "2022-07-07-14-14-26": { + "bits_per_byte": 3.1765385669297954, + "byte_perplexity": 9.04135227904975, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mlt", + "word_perplexity": 33745913.639318146 + } + }, + "gsarti/flores_101_mon+null": { + "2022-07-07-14-14-30": { + "bits_per_byte": 1.6298963953321588, + "byte_perplexity": 3.094907723618666, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mon", + "word_perplexity": 1907397.8049612553 + } + }, + "gsarti/flores_101_mri+null": { + "2022-07-07-14-14-28": { + "bits_per_byte": 2.3966992569986503, + "byte_perplexity": 5.2659698341456505, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mri", + "word_perplexity": 4495.291540723779 + } + }, + "gsarti/flores_101_msa+null": { + "2022-07-07-14-14-10": { + "bits_per_byte": 1.151909452539792, + "byte_perplexity": 2.2220779892820985, + "prompt_name": "null", + "task_name": "gsarti/flores_101_msa", + "word_perplexity": 323.9979770876354 + } + }, + "gsarti/flores_101_mya+null": { + "2022-07-07-14-06-21": { + "bits_per_byte": 1.3350921644793414, + "byte_perplexity": 2.5229159853414433, + "prompt_name": "null", + "task_name": "gsarti/flores_101_mya", + "word_perplexity": 5.908316156500621e+17 + } + }, + "gsarti/flores_101_nld+null": { + "2022-07-07-14-06-23": { + "bits_per_byte": 1.4849903917716452, + "byte_perplexity": 2.799153089002766, + "prompt_name": "null", + "task_name": "gsarti/flores_101_nld", + "word_perplexity": 663.1010445169663 + } + }, + "gsarti/flores_101_nob+null": { + "2022-07-07-14-14-28": { + "bits_per_byte": 1.85954901873699, + "byte_perplexity": 3.628942049758715, + "prompt_name": "null", + "task_name": "gsarti/flores_101_nob", + "word_perplexity": 3091.8317909769676 + } + }, + "gsarti/flores_101_npi+null": { + "2022-07-07-14-14-31": { + "bits_per_byte": 2.736872507282517, + "byte_perplexity": 6.666236527803879, + "prompt_name": "null", + "task_name": "gsarti/flores_101_npi", + "word_perplexity": 830105383859020.1 + } + }, + "gsarti/flores_101_nso+null": { + "2022-07-07-14-14-31": { + "bits_per_byte": 2.3263414881148097, + "byte_perplexity": 5.015319074943932, + "prompt_name": "null", + "task_name": "gsarti/flores_101_nso", + "word_perplexity": 6090.076710210891 + } + }, + "gsarti/flores_101_nya+null": { + "2022-07-07-14-14-43": { + "bits_per_byte": 2.303939703449059, + "byte_perplexity": 4.938044040751036, + "prompt_name": "null", + "task_name": "gsarti/flores_101_nya", + "word_perplexity": 152181.63800903904 + } + }, + "gsarti/flores_101_oci+null": { + "2022-07-07-14-14-29": { + "bits_per_byte": 1.8509757060445136, + "byte_perplexity": 3.607440766288032, + "prompt_name": "null", + "task_name": "gsarti/flores_101_oci", + "word_perplexity": 3290.4072842732626 + } + }, + "gsarti/flores_101_orm+null": { + "2022-07-07-14-15-06": { + "bits_per_byte": 3.5002731101319413, + "byte_perplexity": 11.31585044916705, + "prompt_name": "null", + "task_name": "gsarti/flores_101_orm", + "word_perplexity": 325425986.15171534 + } + }, + "gsarti/flores_101_ory+null": { + "2022-07-07-14-15-11": { + "bits_per_byte": 2.5806016668954035, + "byte_perplexity": 5.981891184515959, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ory", + "word_perplexity": 159585492115824.62 + } + }, + "gsarti/flores_101_pan+null": { + "2022-07-07-14-15-14": { + "bits_per_byte": 2.254475733606418, + "byte_perplexity": 4.7716086841502685, + "prompt_name": "null", + "task_name": "gsarti/flores_101_pan", + "word_perplexity": 1041078005.1029873 + } + }, + "gsarti/flores_101_pol+null": { + "2022-07-07-14-15-02": { + "bits_per_byte": 1.5907226041939495, + "byte_perplexity": 3.01200174157614, + "prompt_name": "null", + "task_name": "gsarti/flores_101_pol", + "word_perplexity": 4097.840857891773 + } + }, + "gsarti/flores_101_por+null": { + "2022-07-07-14-15-09": { + "bits_per_byte": 0.8806049840114304, + "byte_perplexity": 1.8411472115156693, + "prompt_name": "null", + "task_name": "gsarti/flores_101_por", + "word_perplexity": 45.180896865996075 + } + }, + "gsarti/flores_101_pus+null": { + "2022-07-07-14-14-48": { + "bits_per_byte": 2.2091017485050926, + "byte_perplexity": 4.623872921169341, + "prompt_name": "null", + "task_name": "gsarti/flores_101_pus", + "word_perplexity": 191386.96470925998 + } + }, + "gsarti/flores_101_ron+null": { + "2022-07-07-14-15-08": { + "bits_per_byte": 1.608728549763225, + "byte_perplexity": 3.049829411973529, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ron", + "word_perplexity": 1493.2384235032523 + } + }, + "gsarti/flores_101_rus+null": { + "2022-07-07-14-15-39": { + "bits_per_byte": 0.7725988392261112, + "byte_perplexity": 1.7083443875791493, + "prompt_name": "null", + "task_name": "gsarti/flores_101_rus", + "word_perplexity": 1154.4700888886157 + } + }, + "gsarti/flores_101_slk+null": { + "2022-07-07-14-20-59": { + "bits_per_byte": 2.0135407440510016, + "byte_perplexity": 4.037719650548048, + "prompt_name": "null", + "task_name": "gsarti/flores_101_slk", + "word_perplexity": 25449.382756457533 + } + }, + "gsarti/flores_101_slv+null": { + "2022-07-07-14-21-01": { + "bits_per_byte": 2.0499918450523906, + "byte_perplexity": 4.141036287764831, + "prompt_name": "null", + "task_name": "gsarti/flores_101_slv", + "word_perplexity": 12495.414212669684 + } + }, + "gsarti/flores_101_sna+null": { + "2022-07-07-14-16-11": { + "bits_per_byte": 2.23600833315793, + "byte_perplexity": 4.7109183690601295, + "prompt_name": "null", + "task_name": "gsarti/flores_101_sna", + "word_perplexity": 466769.1108859902 + } + }, + "gsarti/flores_101_snd+null": { + "2022-07-07-14-21-11": { + "bits_per_byte": 2.0725074799023684, + "byte_perplexity": 4.206170931541356, + "prompt_name": "null", + "task_name": "gsarti/flores_101_snd", + "word_perplexity": 176077.83905049277 + } + }, + "gsarti/flores_101_som+null": { + "2022-07-07-14-22-05": { + "bits_per_byte": 3.194456204262313, + "byte_perplexity": 9.154342083821405, + "prompt_name": "null", + "task_name": "gsarti/flores_101_som", + "word_perplexity": 1622793.7092813463 + } + }, + "gsarti/flores_101_spa+null": { + "2022-07-07-14-21-38": { + "bits_per_byte": 0.8444512426025511, + "byte_perplexity": 1.7955816311143258, + "prompt_name": "null", + "task_name": "gsarti/flores_101_spa", + "word_perplexity": 36.11085348176834 + } + }, + "gsarti/flores_101_srp+null": { + "2022-07-07-14-16-06": { + "bits_per_byte": 1.1642045407667931, + "byte_perplexity": 2.241096141430147, + "prompt_name": "null", + "task_name": "gsarti/flores_101_srp", + "word_perplexity": 10448.866013109007 + } + }, + "gsarti/flores_101_swe+null": { + "2022-07-07-14-22-35": { + "bits_per_byte": 1.741996368450284, + "byte_perplexity": 3.344977179674293, + "prompt_name": "null", + "task_name": "gsarti/flores_101_swe", + "word_perplexity": 3202.366699713645 + } + }, + "gsarti/flores_101_swh+null": { + "2022-07-07-14-22-41": { + "bits_per_byte": 1.424614292152873, + "byte_perplexity": 2.6844272218041634, + "prompt_name": "null", + "task_name": "gsarti/flores_101_swh", + "word_perplexity": 598.3741533197398 + } + }, + "gsarti/flores_101_tam+null": { + "2022-07-07-14-22-04": { + "bits_per_byte": 2.3686552650983432, + "byte_perplexity": 5.1645951632801745, + "prompt_name": "null", + "task_name": "gsarti/flores_101_tam", + "word_perplexity": 4.2156611720329824e+17 + } + }, + "gsarti/flores_101_tel+null": { + "2022-07-07-14-21-57": { + "bits_per_byte": 2.7676335418251155, + "byte_perplexity": 6.8098996634099445, + "prompt_name": "null", + "task_name": "gsarti/flores_101_tel", + "word_perplexity": 1.6783568147971315e+17 + } + }, + "gsarti/flores_101_tgk+null": { + "2022-07-07-14-21-58": { + "bits_per_byte": 1.920467486722578, + "byte_perplexity": 3.785457016715163, + "prompt_name": "null", + "task_name": "gsarti/flores_101_tgk", + "word_perplexity": 8511682.45733277 + } + }, + "gsarti/flores_101_tgl+null": { + "2022-07-07-14-06-19": { + "bits_per_byte": 1.9068503398392065, + "byte_perplexity": 3.7498953645610875, + "prompt_name": "null", + "task_name": "gsarti/flores_101_tgl", + "word_perplexity": 3650.874856717302 + } + }, + "gsarti/flores_101_tha+null": { + "2022-07-07-14-21-55": { + "bits_per_byte": 1.0732386950813277, + "byte_perplexity": 2.104151663233468, + "prompt_name": "null", + "task_name": "gsarti/flores_101_tha", + "word_perplexity": 1.4183796312158814e+27 + } + }, + "gsarti/flores_101_tur+null": { + "2022-07-07-14-22-16": { + "bits_per_byte": 1.7302373624582268, + "byte_perplexity": 3.3178240103796037, + "prompt_name": "null", + "task_name": "gsarti/flores_101_tur", + "word_perplexity": 23327.73979317249 + } + }, + "gsarti/flores_101_ukr+null": { + "2022-07-07-14-22-21": { + "bits_per_byte": 1.0624971487308417, + "byte_perplexity": 2.088543437159643, + "prompt_name": "null", + "task_name": "gsarti/flores_101_ukr", + "word_perplexity": 12491.267298453955 + } + }, + "gsarti/flores_101_umb+null": { + "2022-07-07-14-22-16": { + "bits_per_byte": 3.5565536775145463, + "byte_perplexity": 11.766013385445124, + "prompt_name": "null", + "task_name": "gsarti/flores_101_umb", + "word_perplexity": 153305092.33867013 + } + }, + "gsarti/flores_101_urd+null": { + "2022-07-07-14-22-31": { + "bits_per_byte": 0.8309610697985835, + "byte_perplexity": 1.7788699847612357, + "prompt_name": "null", + "task_name": "gsarti/flores_101_urd", + "word_perplexity": 120.94778603395578 + } + }, + "gsarti/flores_101_uzb+null": { + "2022-07-07-14-22-55": { + "bits_per_byte": 3.0874424504432936, + "byte_perplexity": 8.499879863290486, + "prompt_name": "null", + "task_name": "gsarti/flores_101_uzb", + "word_perplexity": 39248529.48307052 + } + }, + "gsarti/flores_101_vie+null": { + "2022-07-07-14-22-35": { + "bits_per_byte": 0.7303243859036422, + "byte_perplexity": 1.65901207387262, + "prompt_name": "null", + "task_name": "gsarti/flores_101_vie", + "word_perplexity": 20.72737503298138 + } + }, + "gsarti/flores_101_wol+null": { + "2022-07-07-14-22-41": { + "bits_per_byte": 2.6186389341371745, + "byte_perplexity": 6.141703791276928, + "prompt_name": "null", + "task_name": "gsarti/flores_101_wol", + "word_perplexity": 14624.927467504334 + } + }, + "gsarti/flores_101_xho+null": { + "2022-07-07-14-22-38": { + "bits_per_byte": 2.229649344545252, + "byte_perplexity": 4.690199677955254, + "prompt_name": "null", + "task_name": "gsarti/flores_101_xho", + "word_perplexity": 936027.0652623974 + } + }, + "gsarti/flores_101_yor+null": { + "2022-07-07-14-22-34": { + "bits_per_byte": 2.1245219249827945, + "byte_perplexity": 4.360585696242932, + "prompt_name": "null", + "task_name": "gsarti/flores_101_yor", + "word_perplexity": 17315.274646038364 + } + }, + "gsarti/flores_101_zho_simpl+null": { + "2022-07-07-14-06-11": { + "bits_per_byte": 1.0829440932863963, + "byte_perplexity": 2.1183545781883515, + "prompt_name": "null", + "task_name": "gsarti/flores_101_zho_simpl", + "word_perplexity": 5.350460585250868e+18 + } + }, + "gsarti/flores_101_zho_trad+null": { + "2022-07-07-14-06-11": { + "bits_per_byte": 1.1850976756964047, + "byte_perplexity": 2.273787884962656, + "prompt_name": "null", + "task_name": "gsarti/flores_101_zho_trad", + "word_perplexity": 2.951927469429808e+21 + } + }, + "gsarti/flores_101_zul+null": { + "2022-07-07-14-22-45": { + "bits_per_byte": 2.589033511393412, + "byte_perplexity": 6.016954767729589, + "prompt_name": "null", + "task_name": "gsarti/flores_101_zul", + "word_perplexity": 18904448.40499978 + } + }, + "headqa": { + "2022-07-07-20-56-58": { + "acc": 0.3464624361779723, + "acc_norm": 0.37892049598832966, + "acc_norm_stderr": 0.009266017786984363, + "acc_stderr": 0.009088847929910096 + } + }, + "hellaswag": { + "2022-07-07-20-56-58": { + "acc": 0.5353515236008763, + "acc_norm": 0.6928898625771759, + "acc_norm_stderr": 0.004603527017557853, + "acc_stderr": 0.004977294024778004 + } + }, + "lambada": { + "2022-07-07-20-57-38": { + "acc": 0.6720357073549389, + "acc_stderr": 0.006540659313970564, + "ppl": 4.026306193510304, + "ppl_stderr": 0.09275418030810198 + } + }, + "lambada_mt_de": { + "2022-07-07-21-08-50": { + "acc": 0.3291286629148069, + "acc_stderr": 0.0065465809755531025, + "ppl": 92.13902599578957, + "ppl_stderr": 5.788915447731226 + } + }, + "lambada_mt_en": { + "2022-07-07-21-08-29": { + "acc": 0.6720357073549389, + "acc_stderr": 0.006540659313970564, + "ppl": 4.026306193510304, + "ppl_stderr": 0.09275418030810198 + } + }, + "lambada_mt_es": { + "2022-07-07-21-09-13": { + "acc": 0.476421502037648, + "acc_stderr": 0.00695822793758654, + "ppl": 24.963069573614494, + "ppl_stderr": 1.2397630721670656 + } + }, + "lambada_mt_it": { + "2022-07-07-21-09-05": { + "acc": 0.4061711624296526, + "acc_stderr": 0.006842223524282644, + "ppl": 75.60794415472333, + "ppl_stderr": 5.172800941942063 + } + }, + "logiqa": { + "2022-07-07-20-57-54": { + "acc": 0.2350230414746544, + "acc_norm": 0.261136712749616, + "acc_norm_stderr": 0.017228970682408612, + "acc_stderr": 0.01663116682389096 + } + }, + "mathqa": { + "2022-07-07-20-58-21": { + "acc": 0.27671691792294806, + "acc_norm": 0.27403685092127306, + "acc_norm_stderr": 0.008165116067449045, + "acc_stderr": 0.008189786871508193 + } + }, + "mc_taco": { + "2022-07-07-20-58-49": { + "em": 0.13063063063063063, + "f1": 0.4900140715704704 + } + }, + "mnli+GPT-3 style": { + "2022-07-07-15-16-45": { + "acc": 0.3384615384615385, + "acc_norm": 0.3171676006113092, + "acc_norm_stderr": 0.00469762604036304, + "acc_stderr": 0.004776493430213433, + "prompt_name": "GPT-3 style", + "task_name": "mnli" + } + }, + "mnli+MNLI crowdsource": { + "2022-07-07-15-16-45": { + "acc": 0.3132628152969894, + "acc_norm": 0.3177379983726607, + "acc_norm_stderr": 0.004695818707274009, + "acc_stderr": 0.0046778991762110485, + "prompt_name": "MNLI crowdsource", + "task_name": "mnli" + } + }, + "mnli+always/sometimes/never": { + "2022-07-07-15-16-45": { + "acc": 0.3260781122864117, + "acc_norm": 0.31834825061025224, + "acc_norm_stderr": 0.004698223389253125, + "acc_stderr": 0.004727883394602418, + "prompt_name": "always/sometimes/never", + "task_name": "mnli" + } + }, + "mnli+based on the previous passage": { + "2022-07-07-15-16-45": { + "acc": 0.3695077298616762, + "acc_norm": 0.3135679414157852, + "acc_norm_stderr": 0.004679136972634036, + "acc_stderr": 0.004868024991836125, + "prompt_name": "based on the previous passage", + "task_name": "mnli" + } + }, + "mnli+can we infer": { + "2022-07-07-15-16-45": { + "acc": 0.3791700569568755, + "acc_norm": 0.31916192026037427, + "acc_norm_stderr": 0.004701415184999708, + "acc_stderr": 0.004893329902713743, + "prompt_name": "can we infer", + "task_name": "mnli" + } + }, + "mnli+claim true/false/inconclusive": { + "2022-07-07-15-16-45": { + "acc": 0.3412327095199349, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.004781811948253186, + "prompt_name": "claim true/false/inconclusive", + "task_name": "mnli" + } + }, + "mnli+consider always/sometimes/never": { + "2022-07-07-15-16-45": { + "acc": 0.31834825061025224, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.004698223389253125, + "prompt_name": "consider always/sometimes/never", + "task_name": "mnli" + } + }, + "mnli+does it follow that": { + "2022-07-07-15-16-45": { + "acc": 0.3810008136696501, + "acc_norm": 0.33482506102522375, + "acc_norm_stderr": 0.004759683441650661, + "acc_stderr": 0.0048978913011331945, + "prompt_name": "does it follow that", + "task_name": "mnli" + } + }, + "mnli+does this imply": { + "2022-07-07-15-16-45": { + "acc": 0.31814483319772174, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.004697422861392528, + "prompt_name": "does this imply", + "task_name": "mnli" + } + }, + "mnli+guaranteed true": { + "2022-07-07-15-16-45": { + "acc": 0.3545565500406835, + "acc_norm": 0.3169243287225387, + "acc_norm_stderr": 0.004692597990597633, + "acc_stderr": 0.004824729920335505, + "prompt_name": "guaranteed true", + "task_name": "mnli" + } + }, + "mnli+guaranteed/possible/impossible": { + "2022-07-07-15-16-45": { + "acc": 0.3195687550854353, + "acc_norm": 0.33696094385679415, + "acc_norm_stderr": 0.004767168365987739, + "acc_stderr": 0.004703004900804848, + "prompt_name": "guaranteed/possible/impossible", + "task_name": "mnli" + } + }, + "mnli+justified in saying": { + "2022-07-07-15-16-45": { + "acc": 0.3577095199349064, + "acc_norm": 0.31916192026037427, + "acc_norm_stderr": 0.004701415184999707, + "acc_stderr": 0.004834283814408599, + "prompt_name": "justified in saying", + "task_name": "mnli" + } + }, + "mnli+must be true": { + "2022-07-07-15-16-45": { + "acc": 0.3831366965012205, + "acc_norm": 0.31834825061025224, + "acc_norm_stderr": 0.004698223389253125, + "acc_stderr": 0.004903119688196198, + "prompt_name": "must be true", + "task_name": "mnli" + } + }, + "mnli+should assume": { + "2022-07-07-15-16-45": { + "acc": 0.3682872253864931, + "acc_norm": 0.3184499593165175, + "acc_norm_stderr": 0.004698623266114402, + "acc_stderr": 0.004864680353620058, + "prompt_name": "should assume", + "task_name": "mnli" + } + }, + "mnli+take the following as truth": { + "2022-07-07-15-16-45": { + "acc": 0.3605573637103336, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.0048427174642626716, + "prompt_name": "take the following as truth", + "task_name": "mnli" + } + }, + "mnli_mismatched+GPT-3 style": { + "2022-07-07-15-16-55": { + "acc": 0.3384615384615385, + "acc_norm": 0.3171676006113092, + "acc_norm_stderr": 0.00469762604036304, + "acc_stderr": 0.004776493430213433, + "prompt_name": "GPT-3 style", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+MNLI crowdsource": { + "2022-07-07-15-16-55": { + "acc": 0.3132628152969894, + "acc_norm": 0.3177379983726607, + "acc_norm_stderr": 0.004695818707274009, + "acc_stderr": 0.0046778991762110485, + "prompt_name": "MNLI crowdsource", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+always/sometimes/never": { + "2022-07-07-15-16-55": { + "acc": 0.3260781122864117, + "acc_norm": 0.31834825061025224, + "acc_norm_stderr": 0.004698223389253125, + "acc_stderr": 0.004727883394602418, + "prompt_name": "always/sometimes/never", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+based on the previous passage": { + "2022-07-07-15-16-55": { + "acc": 0.3695077298616762, + "acc_norm": 0.3135679414157852, + "acc_norm_stderr": 0.004679136972634036, + "acc_stderr": 0.004868024991836125, + "prompt_name": "based on the previous passage", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+can we infer": { + "2022-07-07-15-16-55": { + "acc": 0.3791700569568755, + "acc_norm": 0.31916192026037427, + "acc_norm_stderr": 0.004701415184999708, + "acc_stderr": 0.004893329902713743, + "prompt_name": "can we infer", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+claim true/false/inconclusive": { + "2022-07-07-15-16-55": { + "acc": 0.3412327095199349, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.004781811948253186, + "prompt_name": "claim true/false/inconclusive", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+consider always/sometimes/never": { + "2022-07-07-15-16-55": { + "acc": 0.31834825061025224, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.004698223389253125, + "prompt_name": "consider always/sometimes/never", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+does it follow that": { + "2022-07-07-15-16-55": { + "acc": 0.3810008136696501, + "acc_norm": 0.33482506102522375, + "acc_norm_stderr": 0.004759683441650661, + "acc_stderr": 0.0048978913011331945, + "prompt_name": "does it follow that", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+does this imply": { + "2022-07-07-15-16-55": { + "acc": 0.31814483319772174, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.004697422861392528, + "prompt_name": "does this imply", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+guaranteed true": { + "2022-07-07-15-16-55": { + "acc": 0.3545565500406835, + "acc_norm": 0.3169243287225387, + "acc_norm_stderr": 0.004692597990597633, + "acc_stderr": 0.004824729920335505, + "prompt_name": "guaranteed true", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+guaranteed/possible/impossible": { + "2022-07-07-15-16-55": { + "acc": 0.3195687550854353, + "acc_norm": 0.33696094385679415, + "acc_norm_stderr": 0.004767168365987739, + "acc_stderr": 0.004703004900804848, + "prompt_name": "guaranteed/possible/impossible", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+justified in saying": { + "2022-07-07-15-16-55": { + "acc": 0.3577095199349064, + "acc_norm": 0.31916192026037427, + "acc_norm_stderr": 0.004701415184999707, + "acc_stderr": 0.004834283814408599, + "prompt_name": "justified in saying", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+must be true": { + "2022-07-07-15-16-55": { + "acc": 0.3831366965012205, + "acc_norm": 0.31834825061025224, + "acc_norm_stderr": 0.004698223389253125, + "acc_stderr": 0.004903119688196198, + "prompt_name": "must be true", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+should assume": { + "2022-07-07-15-16-55": { + "acc": 0.3682872253864931, + "acc_norm": 0.3184499593165175, + "acc_norm_stderr": 0.004698623266114402, + "acc_stderr": 0.004864680353620058, + "prompt_name": "should assume", + "task_name": "mnli_mismatched" + } + }, + "mnli_mismatched+take the following as truth": { + "2022-07-07-15-16-55": { + "acc": 0.3605573637103336, + "acc_norm": 0.318246541903987, + "acc_norm_stderr": 0.004697823254367764, + "acc_stderr": 0.0048427174642626716, + "prompt_name": "take the following as truth", + "task_name": "mnli_mismatched" + } + }, + "mrpc": { + "2022-07-07-21-01-04": { + "acc": 0.3872549019607843, + "acc_stderr": 0.02414577670826772, + "f1": 0.255952380952381, + "f1_stderr": 0.031339938960756396 + } + }, + "multirc": { + "2022-07-07-21-01-16": { + "acc": 0.024134312696747113, + "acc_stderr": 0.004973865274017642 + } + }, + "multirc+I was going to say\u2026": { + "2022-07-07-15-16-45": { + "acc": 0.5759075907590759, + "acc_norm": 0.4319306930693069, + "acc_norm_stderr": 0.007114939075426624, + "acc_stderr": 0.007098558097324984, + "prompt_name": "I was going to say\u2026", + "task_name": "multirc" + } + }, + "multirc+Would it be good to answer\u2026": { + "2022-07-07-15-16-45": { + "acc": 0.5775577557755776, + "acc_norm": 0.42924917491749176, + "acc_norm_stderr": 0.007109539945167024, + "acc_stderr": 0.007094877001150217, + "prompt_name": "Would it be good to answer\u2026", + "task_name": "multirc" + } + }, + "multirc+confirm": { + "2022-07-07-15-16-45": { + "acc": 0.5717821782178217, + "acc_norm": 0.4280115511551155, + "acc_norm_stderr": 0.007106976252751536, + "acc_stderr": 0.007107406686707527, + "prompt_name": "confirm", + "task_name": "multirc" + } + }, + "multirc+correct": { + "2022-07-07-15-16-45": { + "acc": 0.5596122112211221, + "acc_norm": 0.46844059405940597, + "acc_norm_stderr": 0.00716748273289598, + "acc_stderr": 0.007130577682060969, + "prompt_name": "correct", + "task_name": "multirc" + } + }, + "multirc+decide_valid": { + "2022-07-07-15-16-45": { + "acc": 0.45028877887788776, + "acc_norm": 0.4280115511551155, + "acc_norm_stderr": 0.007106976252751536, + "acc_stderr": 0.00714621953052171, + "prompt_name": "decide_valid", + "task_name": "multirc" + } + }, + "multirc+found_this_answer": { + "2022-07-07-15-16-45": { + "acc": 0.570957095709571, + "acc_norm": 0.4284240924092409, + "acc_norm_stderr": 0.007107835859605359, + "acc_stderr": 0.007109115814226984, + "prompt_name": "found_this_answer", + "task_name": "multirc" + } + }, + "multirc+grading": { + "2022-07-07-15-16-45": { + "acc": 0.5284653465346535, + "acc_norm": 0.4280115511551155, + "acc_norm_stderr": 0.007106976252751536, + "acc_stderr": 0.0071701551755684, + "prompt_name": "grading", + "task_name": "multirc" + } + }, + "multirc+is the correct answer\u2026": { + "2022-07-07-15-16-45": { + "acc": 0.5754950495049505, + "acc_norm": 0.42966171617161714, + "acc_norm_stderr": 0.007110384427500554, + "acc_stderr": 0.0070994657086650955, + "prompt_name": "is the correct answer\u2026", + "task_name": "multirc" + } + }, + "multirc+is\u2026 a correct answer?": { + "2022-07-07-15-16-45": { + "acc": 0.5251650165016502, + "acc_norm": 0.4280115511551155, + "acc_norm_stderr": 0.007106976252751536, + "acc_stderr": 0.007172701181666727, + "prompt_name": "is\u2026 a correct answer?", + "task_name": "multirc" + } + }, + "multirc+paragraph\u2026 question\u2026 is it\u2026 ?": { + "2022-07-07-15-16-45": { + "acc": 0.6124174917491749, + "acc_norm": 0.44781353135313534, + "acc_norm_stderr": 0.0071425777457272445, + "acc_stderr": 0.0069979263566088456, + "prompt_name": "paragraph\u2026 question\u2026 is it\u2026 ?", + "task_name": "multirc" + } + }, + "openbookqa": { + "2022-07-07-21-01-47": { + "acc": 0.312, + "acc_norm": 0.44, + "acc_norm_stderr": 0.022221331534143064, + "acc_stderr": 0.020740596536488062 + } + }, + "piqa": { + "2022-07-07-21-02-18": { + "acc": 0.7812840043525572, + "acc_norm": 0.7829162132752993, + "acc_norm_stderr": 0.009618708415756785, + "acc_stderr": 0.009644731932667558 + } + }, + "prost": { + "2022-07-07-21-04-04": { + "acc": 0.2977156276686593, + "acc_norm": 0.294566609735269, + "acc_norm_stderr": 0.003330373296063641, + "acc_stderr": 0.003340646096835127 + } + }, + "pubmedqa": { + "2022-07-07-21-03-58": { + "acc": 0.741, + "acc_stderr": 0.013860415257527911 + } + }, + "qnli": { + "2022-07-07-21-04-17": { + "acc": 0.5172981878088962, + "acc_stderr": 0.00676136054845682 + } + }, + "qqp": { + "2022-07-07-21-04-21": { + "acc": 0.6101904526341826, + "acc_stderr": 0.002425562336154508, + "f1": 0.11896243291592129, + "f1_stderr": 0.003329403623169031 + } + }, + "qqp+answer": { + "2022-07-07-15-16-50": { + "acc": 0.5627009646302251, + "acc_norm": 0.36816720257234725, + "acc_norm_stderr": 0.002398706610614492, + "acc_stderr": 0.002467070668165308, + "prompt_name": "answer", + "task_name": "qqp" + } + }, + "qqp+duplicate": { + "2022-07-07-15-16-50": { + "acc": 0.5883007667573584, + "acc_norm": 0.36816720257234725, + "acc_norm_stderr": 0.002398706610614492, + "acc_stderr": 0.0024476157358395445, + "prompt_name": "duplicate", + "task_name": "qqp" + } + }, + "qqp+duplicate or not": { + "2022-07-07-15-16-50": { + "acc": 0.617932228543161, + "acc_norm": 0.6318327974276527, + "acc_norm_stderr": 0.002398706610614492, + "acc_stderr": 0.002416540768061597, + "prompt_name": "duplicate or not", + "task_name": "qqp" + } + }, + "qqp+meaning": { + "2022-07-07-15-16-50": { + "acc": 0.6302992827108582, + "acc_norm": 0.36816720257234725, + "acc_norm_stderr": 0.002398706610614492, + "acc_stderr": 0.0024007782802177528, + "prompt_name": "meaning", + "task_name": "qqp" + } + }, + "qqp+quora": { + "2022-07-07-15-16-50": { + "acc": 0.36834034133069504, + "acc_norm": 0.36816720257234725, + "acc_norm_stderr": 0.002398706610614492, + "acc_stderr": 0.0023989418126443636, + "prompt_name": "quora", + "task_name": "qqp" + } + }, + "qqp+same thing": { + "2022-07-07-15-16-50": { + "acc": 0.5667573583972297, + "acc_norm": 0.36816720257234725, + "acc_norm_stderr": 0.002398706610614492, + "acc_stderr": 0.002464436779635773, + "prompt_name": "same thing", + "task_name": "qqp" + } + }, + "race": { + "2022-07-07-21-04-32": { + "acc": 0.39043062200956935, + "acc_stderr": 0.01509848103949509 + } + }, + "rte": { + "2022-07-07-21-04-40": { + "acc": 0.631768953068592, + "acc_stderr": 0.029032524428023697 + } + }, + "rte+does the claim\u2026 follow the fact\u2026": { + "2022-07-07-15-16-52": { + "acc": 0.516245487364621, + "acc_norm": 0.5270758122743683, + "acc_norm_stderr": 0.0300523034631437, + "acc_stderr": 0.030080573208738064, + "prompt_name": "does the claim\u2026 follow the fact\u2026", + "task_name": "rte" + } + }, + "rte+entailment explained": { + "2022-07-07-15-16-52": { + "acc": 0.5270758122743683, + "acc_norm": 0.4729241877256318, + "acc_norm_stderr": 0.0300523034631437, + "acc_stderr": 0.0300523034631437, + "prompt_name": "entailment explained", + "task_name": "rte" + } + }, + "rte+imply": { + "2022-07-07-15-16-52": { + "acc": 0.49458483754512633, + "acc_norm": 0.5270758122743683, + "acc_norm_stderr": 0.0300523034631437, + "acc_stderr": 0.030094698123239966, + "prompt_name": "imply", + "task_name": "rte" + } + }, + "rte+imply separated": { + "2022-07-07-15-16-52": { + "acc": 0.4151624548736462, + "acc_norm": 0.5270758122743683, + "acc_norm_stderr": 0.0300523034631437, + "acc_stderr": 0.02966006629089348, + "prompt_name": "imply separated", + "task_name": "rte" + } + }, + "rte+mean": { + "2022-07-07-15-16-52": { + "acc": 0.5234657039711191, + "acc_norm": 0.5270758122743683, + "acc_norm_stderr": 0.0300523034631437, + "acc_stderr": 0.03006330041190266, + "prompt_name": "mean", + "task_name": "rte" + } + }, + "sciq": { + "2022-07-07-21-04-52": { + "acc": 0.936, + "acc_norm": 0.889, + "acc_norm_stderr": 0.009938701010583726, + "acc_stderr": 0.007743640226919302 + } + }, + "sst": { + "2022-07-07-21-04-53": { + "acc": 0.5860091743119266, + "acc_stderr": 0.016689314109193953 + } + }, + "sst+following positive negative": { + "2022-07-07-15-17-00": { + "acc": 0.5928899082568807, + "acc_norm": 0.5928899082568807, + "acc_norm_stderr": 0.01664691973879633, + "acc_stderr": 0.01664691973879633, + "prompt_name": "following positive negative", + "task_name": "sst" + } + }, + "sst+happy or mad": { + "2022-07-07-15-17-00": { + "acc": 0.6158256880733946, + "acc_norm": 0.5114678899082569, + "acc_norm_stderr": 0.016937396972070192, + "acc_stderr": 0.016481016111204397, + "prompt_name": "happy or mad", + "task_name": "sst" + } + }, + "sst+positive negative after": { + "2022-07-07-15-17-00": { + "acc": 0.658256880733945, + "acc_norm": 0.658256880733945, + "acc_norm_stderr": 0.016070837723775662, + "acc_stderr": 0.016070837723775662, + "prompt_name": "positive negative after", + "task_name": "sst" + } + }, + "sst+review": { + "2022-07-07-15-17-00": { + "acc": 0.6915137614678899, + "acc_norm": 0.6915137614678899, + "acc_norm_stderr": 0.01564981592304773, + "acc_stderr": 0.01564981592304773, + "prompt_name": "review", + "task_name": "sst" + } + }, + "sst+said": { + "2022-07-07-15-17-00": { + "acc": 0.49426605504587157, + "acc_norm": 0.5091743119266054, + "acc_norm_stderr": 0.01693900152535154, + "acc_stderr": 0.016940739619904895, + "prompt_name": "said", + "task_name": "sst" + } + }, + "triviaqa": { + "2022-07-07-21-05-30": { + "acc": 0.18332891363917617, + "acc_stderr": 0.003638055953312879 + } + }, + "tydiqa_primary+en_after_reading_the_text": { + "2022-07-07-15-17-00": { + "acc": 0.3246753246753247, + "acc_norm": 0.6753246753246753, + "acc_norm_stderr": 0.05371235012133188, + "acc_stderr": 0.05371235012133188, + "prompt_name": "en_after_reading_the_text", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_based_on_the_text": { + "2022-07-07-15-17-00": { + "acc": 0.3246753246753247, + "acc_norm": 0.6623376623376623, + "acc_norm_stderr": 0.05424681453014243, + "acc_stderr": 0.05371235012133188, + "prompt_name": "en_based_on_the_text", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_heres_what_I_found": { + "2022-07-07-15-17-00": { + "acc": 0.04364694471387003, + "acc_norm": 0.8865179437439379, + "acc_norm_stderr": 0.009882998992776547, + "acc_stderr": 0.006366011762341235, + "prompt_name": "en_heres_what_I_found", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_open_domain_qa": { + "2022-07-07-15-17-00": { + "acc": 0.45454545454545453, + "acc_norm": 0.6753246753246753, + "acc_norm_stderr": 0.05371235012133188, + "acc_stderr": 0.057116442039776665, + "prompt_name": "en_open_domain_qa", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_open_domain_qa_without_choices": { + "2022-07-07-15-17-00": { + "acc": 0.6103896103896104, + "acc_norm": 0.6753246753246753, + "acc_norm_stderr": 0.05371235012133188, + "acc_stderr": 0.055938656946933486, + "prompt_name": "en_open_domain_qa_without_choices", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_read_and_answer": { + "2022-07-07-15-17-00": { + "acc": 0.04655674102812803, + "acc_norm": 0.9000969932104753, + "acc_norm_stderr": 0.009343623339508961, + "acc_stderr": 0.006564778842833093, + "prompt_name": "en_read_and_answer", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_yes_no_none": { + "2022-07-07-15-17-00": { + "acc": 0.06013579049466537, + "acc_norm": 0.9097963142580019, + "acc_norm_stderr": 0.00892617949675601, + "acc_stderr": 0.007407650020843774, + "prompt_name": "en_yes_no_none", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+en_yes_no_question": { + "2022-07-07-15-17-00": { + "acc": 0.9078564500484966, + "acc_norm": 0.07856450048496605, + "acc_norm_stderr": 0.008383532155739852, + "acc_stderr": 0.009012026277429789, + "prompt_name": "en_yes_no_question", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_after_reading_the_text": { + "2022-07-07-15-17-00": { + "acc": 0.1864406779661017, + "acc_norm": 0.2033898305084746, + "acc_norm_stderr": 0.052853474644238056, + "acc_stderr": 0.05113884945465193, + "prompt_name": "id_after_reading_the_text", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_based_on_the_text": { + "2022-07-07-15-17-00": { + "acc": 0.23728813559322035, + "acc_norm": 0.2033898305084746, + "acc_norm_stderr": 0.052853474644238056, + "acc_stderr": 0.05586042894941199, + "prompt_name": "id_based_on_the_text", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_heres_what_I_found": { + "2022-07-07-15-17-00": { + "acc": 0.009418282548476454, + "acc_norm": 0.9673130193905817, + "acc_norm_stderr": 0.0041865150102794995, + "acc_stderr": 0.002274116687551378, + "prompt_name": "id_heres_what_I_found", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_open_domain_qa": { + "2022-07-07-15-17-00": { + "acc": 0.6610169491525424, + "acc_norm": 0.2033898305084746, + "acc_norm_stderr": 0.052853474644238056, + "acc_stderr": 0.06215574738115915, + "prompt_name": "id_open_domain_qa", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_open_domain_qa_without_choices": { + "2022-07-07-15-17-00": { + "acc": 0.6949152542372882, + "acc_norm": 0.2033898305084746, + "acc_norm_stderr": 0.052853474644238056, + "acc_stderr": 0.06045916884710696, + "prompt_name": "id_open_domain_qa_without_choices", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_read_and_answer": { + "2022-07-07-15-17-00": { + "acc": 0.008310249307479225, + "acc_norm": 0.9673130193905817, + "acc_norm_stderr": 0.0041865150102794995, + "acc_stderr": 0.0021373550525829567, + "prompt_name": "id_read_and_answer", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_yes_no_none": { + "2022-07-07-15-17-00": { + "acc": 0.009972299168975069, + "acc_norm": 0.9667590027700831, + "acc_norm_stderr": 0.004220635699239678, + "acc_stderr": 0.0023393922991691816, + "prompt_name": "id_yes_no_none", + "task_name": "tydiqa_primary" + } + }, + "tydiqa_primary+id_yes_no_question": { + "2022-07-07-15-17-00": { + "acc": 0.9013850415512465, + "acc_norm": 0.9673130193905817, + "acc_norm_stderr": 0.0041865150102794995, + "acc_stderr": 0.0070195343691676106, + "prompt_name": "id_yes_no_question", + "task_name": "tydiqa_primary" + } + }, + "webqs": { + "2022-07-07-21-06-26": { + "acc": 0.061515748031496065, + "acc_stderr": 0.005331527918306684 + } + }, + "wic": { + "2022-07-07-21-06-57": { + "acc": 0.47492163009404387, + "acc_stderr": 0.019785786700500567 + } + }, + "wic+GPT-3-prompt": { + "2022-07-07-15-17-53": { + "acc": 0.4702194357366771, + "acc_norm": 0.5, + "acc_norm_stderr": 0.01981072129375818, + "acc_stderr": 0.019775550529171217, + "prompt_name": "GPT-3-prompt", + "task_name": "wic" + } + }, + "wic+GPT-3-prompt-with-label": { + "2022-07-07-15-17-53": { + "acc": 0.5062695924764891, + "acc_norm": 0.5, + "acc_norm_stderr": 0.01981072129375818, + "acc_stderr": 0.019809163801196517, + "prompt_name": "GPT-3-prompt-with-label", + "task_name": "wic" + } + }, + "wic+affirmation_true_or_false": { + "2022-07-07-15-17-53": { + "acc": 0.5, + "acc_norm": 0.493730407523511, + "acc_norm_stderr": 0.019809163801196513, + "acc_stderr": 0.01981072129375818, + "prompt_name": "affirmation_true_or_false", + "task_name": "wic" + } + }, + "wic+grammar_homework": { + "2022-07-07-15-17-53": { + "acc": 0.5141065830721003, + "acc_norm": 0.49686520376175547, + "acc_norm_stderr": 0.01981033193209754, + "acc_stderr": 0.01980283522800584, + "prompt_name": "grammar_homework", + "task_name": "wic" + } + }, + "wic+polysemous": { + "2022-07-07-15-17-53": { + "acc": 0.5501567398119123, + "acc_norm": 0.49216300940438873, + "acc_norm_stderr": 0.019808287657813832, + "acc_stderr": 0.019710793664739733, + "prompt_name": "polysemous", + "task_name": "wic" + } + }, + "wic+question-context": { + "2022-07-07-15-17-53": { + "acc": 0.5329153605015674, + "acc_norm": 0.493730407523511, + "acc_norm_stderr": 0.019809163801196517, + "acc_stderr": 0.019767747983778065, + "prompt_name": "question-context", + "task_name": "wic" + } + }, + "wic+question-context-meaning": { + "2022-07-07-15-17-53": { + "acc": 0.5078369905956113, + "acc_norm": 0.49216300940438873, + "acc_norm_stderr": 0.019808287657813832, + "acc_stderr": 0.019808287657813832, + "prompt_name": "question-context-meaning", + "task_name": "wic" + } + }, + "wic+question-context-meaning-with-label": { + "2022-07-07-15-17-53": { + "acc": 0.5188087774294671, + "acc_norm": 0.5, + "acc_norm_stderr": 0.01981072129375818, + "acc_stderr": 0.01979669944945386, + "prompt_name": "question-context-meaning-with-label", + "task_name": "wic" + } + }, + "wic+same_sense": { + "2022-07-07-15-17-53": { + "acc": 0.49843260188087773, + "acc_norm": 0.5, + "acc_norm_stderr": 0.01981072129375818, + "acc_stderr": 0.019810623954060382, + "prompt_name": "same_sense", + "task_name": "wic" + } + }, + "wic+similar-sense": { + "2022-07-07-15-17-53": { + "acc": 0.49686520376175547, + "acc_norm": 0.49059561128526646, + "acc_norm_stderr": 0.0198072167632715, + "acc_stderr": 0.019810331932097542, + "prompt_name": "similar-sense", + "task_name": "wic" + } + }, + "winogrande": { + "2022-07-07-21-07-18": { + "acc": 0.7095501183898973, + "acc_stderr": 0.012758813448064609 + } + }, + "wnli": { + "2022-07-07-21-07-16": { + "acc": 0.5774647887323944, + "acc_stderr": 0.05903984205682581 + } + }, + "wnli+confident": { + "2022-07-07-15-17-52": { + "acc": 0.4507042253521127, + "acc_norm": 0.43661971830985913, + "acc_norm_stderr": 0.05927935558412971, + "acc_stderr": 0.05947027187737998, + "prompt_name": "confident", + "task_name": "wnli" + } + }, + "wnli+entailment explained": { + "2022-07-07-15-17-52": { + "acc": 0.6056338028169014, + "acc_norm": 0.43661971830985913, + "acc_norm_stderr": 0.0592793555841297, + "acc_stderr": 0.058412510854444266, + "prompt_name": "entailment explained", + "task_name": "wnli" + } + }, + "wnli+imply": { + "2022-07-07-15-17-52": { + "acc": 0.5774647887323944, + "acc_norm": 0.43661971830985913, + "acc_norm_stderr": 0.0592793555841297, + "acc_stderr": 0.05903984205682581, + "prompt_name": "imply", + "task_name": "wnli" + } + }, + "wnli+justified": { + "2022-07-07-15-17-52": { + "acc": 0.4788732394366197, + "acc_norm": 0.43661971830985913, + "acc_norm_stderr": 0.0592793555841297, + "acc_stderr": 0.05970805879899505, + "prompt_name": "justified", + "task_name": "wnli" + } + }, + "wnli+mean": { + "2022-07-07-15-17-52": { + "acc": 0.5633802816901409, + "acc_norm": 0.43661971830985913, + "acc_norm_stderr": 0.0592793555841297, + "acc_stderr": 0.0592793555841297, + "prompt_name": "mean", + "task_name": "wnli" + } + }, + "wsc": { + "2022-07-07-21-08-32": { + "acc": 0.40384615384615385, + "acc_stderr": 0.04834688952654018 + } + }, + "wsc+GPT-3 Style": { + "2022-07-07-15-17-39": { + "acc": 0.5769230769230769, + "acc_norm": 0.38461538461538464, + "acc_norm_stderr": 0.047936688680750406, + "acc_stderr": 0.048679937479186836, + "prompt_name": "GPT-3 Style", + "task_name": "wsc" + } + }, + "wsc+I think they mean": { + "2022-07-07-15-17-39": { + "acc": 0.5192307692307693, + "acc_norm": 0.36538461538461536, + "acc_norm_stderr": 0.0474473339327792, + "acc_stderr": 0.049230010729780505, + "prompt_name": "I think they mean", + "task_name": "wsc" + } + }, + "wsc+Who or what is/are": { + "2022-07-07-15-17-39": { + "acc": 0.6346153846153846, + "acc_norm": 0.38461538461538464, + "acc_norm_stderr": 0.047936688680750406, + "acc_stderr": 0.0474473339327792, + "prompt_name": "Who or what is/are", + "task_name": "wsc" + } + }, + "wsc+by p they mean": { + "2022-07-07-15-17-39": { + "acc": 0.5480769230769231, + "acc_norm": 0.36538461538461536, + "acc_norm_stderr": 0.0474473339327792, + "acc_stderr": 0.049038186969314335, + "prompt_name": "by p they mean", + "task_name": "wsc" + } + }, + "wsc+does p stand for": { + "2022-07-07-15-17-39": { + "acc": 0.5961538461538461, + "acc_norm": 0.36538461538461536, + "acc_norm_stderr": 0.0474473339327792, + "acc_stderr": 0.048346889526540184, + "prompt_name": "does p stand for", + "task_name": "wsc" + } + }, + "wsc+does the pronoun refer to": { + "2022-07-07-15-17-39": { + "acc": 0.4519230769230769, + "acc_norm": 0.36538461538461536, + "acc_norm_stderr": 0.0474473339327792, + "acc_stderr": 0.049038186969314335, + "prompt_name": "does the pronoun refer to", + "task_name": "wsc" + } + }, + "wsc+in other words": { + "2022-07-07-15-17-39": { + "acc": 0.41346153846153844, + "acc_norm": 0.6442307692307693, + "acc_norm_stderr": 0.04717221961050337, + "acc_stderr": 0.04852294969729053, + "prompt_name": "in other words", + "task_name": "wsc" + } + }, + "wsc+p is/are r": { + "2022-07-07-15-17-39": { + "acc": 0.4519230769230769, + "acc_norm": 0.5769230769230769, + "acc_norm_stderr": 0.048679937479186836, + "acc_stderr": 0.049038186969314335, + "prompt_name": "p is/are r", + "task_name": "wsc" + } + }, + "wsc+replaced with": { + "2022-07-07-15-17-39": { + "acc": 0.5576923076923077, + "acc_norm": 0.36538461538461536, + "acc_norm_stderr": 0.0474473339327792, + "acc_stderr": 0.04893740777701, + "prompt_name": "replaced with", + "task_name": "wsc" + } + }, + "wsc+the pronoun refers to": { + "2022-07-07-15-17-39": { + "acc": 0.36538461538461536, + "acc_norm": 0.40384615384615385, + "acc_norm_stderr": 0.048346889526540184, + "acc_stderr": 0.0474473339327792, + "prompt_name": "the pronoun refers to", + "task_name": "wsc" + } + } + }, + "versions": { + "arc_challenge": 0, + "arc_easy": 0, + "axb+GPT-3 style": 0, + "axb+MNLI crowdsource": 0, + "axb+based on the previous passage": 0, + "axb+can we infer": 0, + "axb+does it follow that": 0, + "axb+does this imply": 0, + "axb+guaranteed true": 0, + "axb+justified in saying": 0, + "axb+must be true": 0, + "axb+should assume": 0, + "axg+GPT-3 style": 0, + "axg+MNLI crowdsource": 0, + "axg+based on the previous passage": 0, + "axg+can we infer": 0, + "axg+does it follow that": 0, + "axg+does this imply": 0, + "axg+guaranteed true": 0, + "axg+justified in saying": 0, + "axg+must be true": 0, + "axg+should assume": 0, + "boolq": 1, + "boolq+GPT-3 Style": 0, + "boolq+I wonder\u2026": 0, + "boolq+after_reading": 0, + "boolq+based on the following passage": 0, + "boolq+based on the previous passage": 0, + "boolq+could you tell me\u2026": 0, + "boolq+exam": 0, + "boolq+exercise": 0, + "boolq+valid_binary": 0, + "boolq+yes_no_question": 0, + "cb+GPT-3 style": 0, + "cb+MNLI crowdsource": 0, + "cb+always/sometimes/never": 0, + "cb+based on the previous passage": 0, + "cb+can we infer": 0, + "cb+claim true/false/inconclusive": 0, + "cb+consider always/sometimes/never": 0, + "cb+does it follow that": 0, + "cb+does this imply": 0, + "cb+guaranteed true": 0, + "cb+guaranteed/possible/impossible": 0, + "cb+justified in saying": 0, + "cb+must be true": 0, + "cb+should assume": 0, + "cb+take the following as truth": 0, + "cola+Following sentence acceptable": 0, + "cola+Make sense yes no": 0, + "cola+Previous sentence acceptable": 0, + "cola+editing": 0, + "cola+is_this_correct": 0, + "copa": 0, + "copa+C1 or C2? premise, so/because\u2026": 0, + "copa+best_option": 0, + "copa+cause_effect": 0, + "copa+choose": 0, + "copa+exercise": 0, + "copa+i_am_hesitating": 0, + "copa+more likely": 0, + "copa+plausible_alternatives": 0, + "crows_pairs_english+1": 0, + "crows_pairs_english+2": 0, + "crows_pairs_english+3": 0, + "crows_pairs_english+4": 0, + "crows_pairs_english+A_preference": 0, + "crows_pairs_english+A_reality_check": 0, + "crows_pairs_english+A_stereotype_true": 0, + "crows_pairs_french+1_fr": 0, + "crows_pairs_french+2_fr": 0, + "crows_pairs_french+3_fr": 0, + "crows_pairs_french+4_fr": 0, + "crows_pairs_french+A_preference_fr": 0, + "crows_pairs_french+A_reality_check_fr": 0, + "crows_pairs_french+A_stereotype_true_fr": 0, + "diabla+Is the error present? (same lang)": 0, + "diabla+Which is automatic?": 0, + "gsarti/flores_101_afr+null": 0, + "gsarti/flores_101_amh+null": 0, + "gsarti/flores_101_ara+null": 0, + "gsarti/flores_101_asm+null": 0, + "gsarti/flores_101_ast+null": 0, + "gsarti/flores_101_azj+null": 0, + "gsarti/flores_101_bel+null": 0, + "gsarti/flores_101_ben+null": 0, + "gsarti/flores_101_bos+null": 0, + "gsarti/flores_101_bul+null": 0, + "gsarti/flores_101_cat+null": 0, + "gsarti/flores_101_ceb+null": 0, + "gsarti/flores_101_ces+null": 0, + "gsarti/flores_101_ckb+null": 0, + "gsarti/flores_101_cym+null": 0, + "gsarti/flores_101_dan+null": 0, + "gsarti/flores_101_deu+null": 0, + "gsarti/flores_101_ell+null": 0, + "gsarti/flores_101_eng+null": 0, + "gsarti/flores_101_est+null": 0, + "gsarti/flores_101_fas+null": 0, + "gsarti/flores_101_fin+null": 0, + "gsarti/flores_101_fra+null": 0, + "gsarti/flores_101_ful+null": 0, + "gsarti/flores_101_gle+null": 0, + "gsarti/flores_101_glg+null": 0, + "gsarti/flores_101_guj+null": 0, + "gsarti/flores_101_hau+null": 0, + "gsarti/flores_101_heb+null": 0, + "gsarti/flores_101_hin+null": 0, + "gsarti/flores_101_hrv+null": 0, + "gsarti/flores_101_hun+null": 0, + "gsarti/flores_101_hye+null": 0, + "gsarti/flores_101_ibo+null": 0, + "gsarti/flores_101_ind+null": 0, + "gsarti/flores_101_isl+null": 0, + "gsarti/flores_101_ita+null": 0, + "gsarti/flores_101_jav+null": 0, + "gsarti/flores_101_jpn+null": 0, + "gsarti/flores_101_kam+null": 0, + "gsarti/flores_101_kan+null": 0, + "gsarti/flores_101_kat+null": 0, + "gsarti/flores_101_kaz+null": 0, + "gsarti/flores_101_kea+null": 0, + "gsarti/flores_101_khm+null": 0, + "gsarti/flores_101_kir+null": 0, + "gsarti/flores_101_kor+null": 0, + "gsarti/flores_101_lao+null": 0, + "gsarti/flores_101_lav+null": 0, + "gsarti/flores_101_lin+null": 0, + "gsarti/flores_101_lit+null": 0, + "gsarti/flores_101_ltz+null": 0, + "gsarti/flores_101_lug+null": 0, + "gsarti/flores_101_luo+null": 0, + "gsarti/flores_101_mal+null": 0, + "gsarti/flores_101_mar+null": 0, + "gsarti/flores_101_mkd+null": 0, + "gsarti/flores_101_mlt+null": 0, + "gsarti/flores_101_mon+null": 0, + "gsarti/flores_101_mri+null": 0, + "gsarti/flores_101_msa+null": 0, + "gsarti/flores_101_mya+null": 0, + "gsarti/flores_101_nld+null": 0, + "gsarti/flores_101_nob+null": 0, + "gsarti/flores_101_npi+null": 0, + "gsarti/flores_101_nso+null": 0, + "gsarti/flores_101_nya+null": 0, + "gsarti/flores_101_oci+null": 0, + "gsarti/flores_101_orm+null": 0, + "gsarti/flores_101_ory+null": 0, + "gsarti/flores_101_pan+null": 0, + "gsarti/flores_101_pol+null": 0, + "gsarti/flores_101_por+null": 0, + "gsarti/flores_101_pus+null": 0, + "gsarti/flores_101_ron+null": 0, + "gsarti/flores_101_rus+null": 0, + "gsarti/flores_101_slk+null": 0, + "gsarti/flores_101_slv+null": 0, + "gsarti/flores_101_sna+null": 0, + "gsarti/flores_101_snd+null": 0, + "gsarti/flores_101_som+null": 0, + "gsarti/flores_101_spa+null": 0, + "gsarti/flores_101_srp+null": 0, + "gsarti/flores_101_swe+null": 0, + "gsarti/flores_101_swh+null": 0, + "gsarti/flores_101_tam+null": 0, + "gsarti/flores_101_tel+null": 0, + "gsarti/flores_101_tgk+null": 0, + "gsarti/flores_101_tgl+null": 0, + "gsarti/flores_101_tha+null": 0, + "gsarti/flores_101_tur+null": 0, + "gsarti/flores_101_ukr+null": 0, + "gsarti/flores_101_umb+null": 0, + "gsarti/flores_101_urd+null": 0, + "gsarti/flores_101_uzb+null": 0, + "gsarti/flores_101_vie+null": 0, + "gsarti/flores_101_wol+null": 0, + "gsarti/flores_101_xho+null": 0, + "gsarti/flores_101_yor+null": 0, + "gsarti/flores_101_zho_simpl+null": 0, + "gsarti/flores_101_zho_trad+null": 0, + "gsarti/flores_101_zul+null": 0, + "headqa": 0, + "hellaswag": 0, + "lambada": 0, + "lambada_mt_de": 0, + "lambada_mt_en": 0, + "lambada_mt_es": 0, + "lambada_mt_it": 0, + "logiqa": 0, + "mathqa": 0, + "mc_taco": 0, + "mnli+GPT-3 style": 0, + "mnli+MNLI crowdsource": 0, + "mnli+always/sometimes/never": 0, + "mnli+based on the previous passage": 0, + "mnli+can we infer": 0, + "mnli+claim true/false/inconclusive": 0, + "mnli+consider always/sometimes/never": 0, + "mnli+does it follow that": 0, + "mnli+does this imply": 0, + "mnli+guaranteed true": 0, + "mnli+guaranteed/possible/impossible": 0, + "mnli+justified in saying": 0, + "mnli+must be true": 0, + "mnli+should assume": 0, + "mnli+take the following as truth": 0, + "mnli_mismatched+GPT-3 style": 0, + "mnli_mismatched+MNLI crowdsource": 0, + "mnli_mismatched+always/sometimes/never": 0, + "mnli_mismatched+based on the previous passage": 0, + "mnli_mismatched+can we infer": 0, + "mnli_mismatched+claim true/false/inconclusive": 0, + "mnli_mismatched+consider always/sometimes/never": 0, + "mnli_mismatched+does it follow that": 0, + "mnli_mismatched+does this imply": 0, + "mnli_mismatched+guaranteed true": 0, + "mnli_mismatched+guaranteed/possible/impossible": 0, + "mnli_mismatched+justified in saying": 0, + "mnli_mismatched+must be true": 0, + "mnli_mismatched+should assume": 0, + "mnli_mismatched+take the following as truth": 0, + "mrpc": 0, + "multirc": 1, + "multirc+I was going to say\u2026": 0, + "multirc+Would it be good to answer\u2026": 0, + "multirc+confirm": 0, + "multirc+correct": 0, + "multirc+decide_valid": 0, + "multirc+found_this_answer": 0, + "multirc+grading": 0, + "multirc+is the correct answer\u2026": 0, + "multirc+is\u2026 a correct answer?": 0, + "multirc+paragraph\u2026 question\u2026 is it\u2026 ?": 0, + "openbookqa": 0, + "piqa": 0, + "prost": 0, + "pubmedqa": 0, + "qnli": 0, + "qqp": 0, + "qqp+answer": 0, + "qqp+duplicate": 0, + "qqp+duplicate or not": 0, + "qqp+meaning": 0, + "qqp+quora": 0, + "qqp+same thing": 0, + "race": 1, + "rte": 0, + "rte+does the claim\u2026 follow the fact\u2026": 0, + "rte+entailment explained": 0, + "rte+imply": 0, + "rte+imply separated": 0, + "rte+mean": 0, + "sciq": 0, + "sst": 0, + "sst+following positive negative": 0, + "sst+happy or mad": 0, + "sst+positive negative after": 0, + "sst+review": 0, + "sst+said": 0, + "triviaqa": 0, + "tydiqa_primary+en_after_reading_the_text": 0, + "tydiqa_primary+en_based_on_the_text": 0, + "tydiqa_primary+en_heres_what_I_found": 0, + "tydiqa_primary+en_open_domain_qa": 0, + "tydiqa_primary+en_open_domain_qa_without_choices": 0, + "tydiqa_primary+en_read_and_answer": 0, + "tydiqa_primary+en_yes_no_none": 0, + "tydiqa_primary+en_yes_no_question": 0, + "tydiqa_primary+id_after_reading_the_text": 0, + "tydiqa_primary+id_based_on_the_text": 0, + "tydiqa_primary+id_heres_what_I_found": 0, + "tydiqa_primary+id_open_domain_qa": 0, + "tydiqa_primary+id_open_domain_qa_without_choices": 0, + "tydiqa_primary+id_read_and_answer": 0, + "tydiqa_primary+id_yes_no_none": 0, + "tydiqa_primary+id_yes_no_question": 0, + "webqs": 0, + "wic": 0, + "wic+GPT-3-prompt": 0, + "wic+GPT-3-prompt-with-label": 0, + "wic+affirmation_true_or_false": 0, + "wic+grammar_homework": 0, + "wic+polysemous": 0, + "wic+question-context": 0, + "wic+question-context-meaning": 0, + "wic+question-context-meaning-with-label": 0, + "wic+same_sense": 0, + "wic+similar-sense": 0, + "winogrande": 0, + "wnli": 1, + "wnli+confident": 1, + "wnli+entailment explained": 1, + "wnli+imply": 1, + "wnli+justified": 1, + "wnli+mean": 1, + "wsc": 0, + "wsc+GPT-3 Style": 0, + "wsc+I think they mean": 0, + "wsc+Who or what is/are": 0, + "wsc+by p they mean": 0, + "wsc+does p stand for": 0, + "wsc+does the pronoun refer to": 0, + "wsc+in other words": 0, + "wsc+p is/are r": 0, + "wsc+replaced with": 0, + "wsc+the pronoun refers to": 0 + } +} \ No newline at end of file diff --git a/evaluation/results/tr11/bloom/humaneval_temp02.json b/evaluation/results/tr11/bloom/humaneval_temp02.json new file mode 100644 index 0000000000000000000000000000000000000000..7dbe348628b58c4203666c3e55e5fcb36c27fbf5 --- /dev/null +++ b/evaluation/results/tr11/bloom/humaneval_temp02.json @@ -0,0 +1 @@ +{"pass@1": 0.15524390243902436, "pass@10": 0.25233297635811675, "pass@100": 0.31325799399147314} \ No newline at end of file diff --git a/evaluation/results/tr11/bloom/humaneval_temp06.json b/evaluation/results/tr11/bloom/humaneval_temp06.json new file mode 100644 index 0000000000000000000000000000000000000000..bc25f0f3d8d256e27a88137541f3a1410b43957b --- /dev/null +++ b/evaluation/results/tr11/bloom/humaneval_temp06.json @@ -0,0 +1 @@ +{"pass@1": 0.14417682926829267, "pass@10": 0.3220367632383857, "pass@100": 0.5380466541797105} \ No newline at end of file diff --git a/evaluation/results/tr11/bloom/humaneval_temp08.json b/evaluation/results/tr11/bloom/humaneval_temp08.json new file mode 100644 index 0000000000000000000000000000000000000000..52b33d44f182400232f7fce0d226f45b1fd31e2e --- /dev/null +++ b/evaluation/results/tr11/bloom/humaneval_temp08.json @@ -0,0 +1 @@ +{"pass@1": 0.12228658536585368, "pass@10": 0.30805813658531067, "pass@100": 0.5545431515723145} \ No newline at end of file diff --git a/evaluation/results/tr11/bloom/mdtable.txt b/evaluation/results/tr11/bloom/mdtable.txt new file mode 100644 index 0000000000000000000000000000000000000000..11432d0b694a2a60d38aa8723fbd02df0e2bde4b --- /dev/null +++ b/evaluation/results/tr11/bloom/mdtable.txt @@ -0,0 +1,148 @@ +| Task | Language | Metric | BLOOM-176B | OPT-175B* | +|:--------|:-----------------|:------------------------|-------------:|------------:| +| arc_challenge | eng | acc ↑ | 0.411 | 0.412 | +| arc_easy | eng | acc ↑ | 0.726 | 0.751 | +| axb (Median of 10 prompts) | eng | acc ↑ | 0.575 | 0.532 | +| axg (Median of 10 prompts) | eng | acc ↑ | 0.525 | 0.548 | +| boolq (Median of 11 prompts) | eng | acc ↑ | 0.635 | 0.622 | +| cb (Median of 15 prompts) | eng | acc ↑ | 0.339 | 0.411 | +| cola (Median of 5 prompts) | eng | acc ↑ | 0.39 | 0.444 | +| copa (Median of 9 prompts) | eng | acc ↑ | 0.56 | 0.55 | +| crows_pairs_english (Median of 6 prompts) | eng | acc ↑ | 0.5 | 0.502 | +| crows_pairs_french (Median of 7 prompts) | fra | acc ↑ | 0.506 | 0.499 | +| diabla (Median of 2 prompts) | eng | acc ↑ | 0.295 | 0.289 | +| gsarti/flores_101_afr | afr | byte_perplexity ↓ | 4.254 | 3.381 | +| gsarti/flores_101_amh | amh | byte_perplexity ↓ | 3.717 | 3.87 | +| gsarti/flores_101_ara | ara | byte_perplexity ↓ | 1.705 | 2.42 | +| gsarti/flores_101_asm | asm | byte_perplexity ↓ | 6.577 | 3.028 | +| gsarti/flores_101_ast | ast | byte_perplexity ↓ | 2.856 | 4.737 | +| gsarti/flores_101_azj | azj | byte_perplexity ↓ | 4.807 | 4.767 | +| gsarti/flores_101_bel | bel | byte_perplexity ↓ | 2.731 | 2.557 | +| gsarti/flores_101_ben | ben | byte_perplexity ↓ | 5.993 | 2.243 | +| gsarti/flores_101_bos | bos | byte_perplexity ↓ | 3.594 | 2.668 | +| gsarti/flores_101_bul | bul | byte_perplexity ↓ | 2.159 | 2.099 | +| gsarti/flores_101_cat | cat | byte_perplexity ↓ | 2.168 | 2.837 | +| gsarti/flores_101_ceb | ceb | byte_perplexity ↓ | 5.287 | 3.636 | +| gsarti/flores_101_ces | ces | byte_perplexity ↓ | 3.452 | 2.749 | +| gsarti/flores_101_ckb | ckb | byte_perplexity ↓ | 3.705 | 4.688 | +| gsarti/flores_101_cym | cym | byte_perplexity ↓ | 7.089 | 5.075 | +| gsarti/flores_101_dan | dan | byte_perplexity ↓ | 3.43 | 2.492 | +| gsarti/flores_101_deu | deu | byte_perplexity ↓ | 2.338 | 2.099 | +| gsarti/flores_101_ell | ell | byte_perplexity ↓ | 1.96 | 1.811 | +| gsarti/flores_101_eng | eng | byte_perplexity ↓ | 1.882 | 1.9 | +| gsarti/flores_101_est | est | byte_perplexity ↓ | 5.774 | 3.533 | +| gsarti/flores_101_fas | fas | byte_perplexity ↓ | 2.431 | 2.444 | +| gsarti/flores_101_fin | fin | byte_perplexity ↓ | 4.304 | 2.601 | +| gsarti/flores_101_fra | fra | byte_perplexity ↓ | 1.937 | 1.984 | +| gsarti/flores_101_ful | ful | byte_perplexity ↓ | 9.74 | 11.84 | +| gsarti/flores_101_gle | gle | byte_perplexity ↓ | 6.035 | 3.914 | +| gsarti/flores_101_glg | glg | byte_perplexity ↓ | 2.365 | 3.015 | +| gsarti/flores_101_guj | guj | byte_perplexity ↓ | 5.707 | 2.438 | +| gsarti/flores_101_hau | hau | byte_perplexity ↓ | 8.855 | 5.283 | +| gsarti/flores_101_heb | heb | byte_perplexity ↓ | 2.921 | 2.903 | +| gsarti/flores_101_hin | hin | byte_perplexity ↓ | 5.452 | 1.86 | +| gsarti/flores_101_hrv | hrv | byte_perplexity ↓ | 3.706 | 2.715 | +| gsarti/flores_101_hun | hun | byte_perplexity ↓ | 4.059 | 2.865 | +| gsarti/flores_101_hye | hye | byte_perplexity ↓ | 3.127 | 3.411 | +| gsarti/flores_101_ibo | ibo | byte_perplexity ↓ | 3.95 | 8.008 | +| gsarti/flores_101_ind | ind | byte_perplexity ↓ | 1.976 | 2.632 | +| gsarti/flores_101_isl | isl | byte_perplexity ↓ | 5.501 | 4.701 | +| gsarti/flores_101_ita | ita | byte_perplexity ↓ | 2.314 | 2.104 | +| gsarti/flores_101_jav | jav | byte_perplexity ↓ | 4.942 | 8.16 | +| gsarti/flores_101_jpn | jpn | byte_perplexity ↓ | 2.259 | 2.198 | +| gsarti/flores_101_kam | kam | byte_perplexity ↓ | 9.743 | 10.981 | +| gsarti/flores_101_kan | kan | byte_perplexity ↓ | 6.234 | 2.373 | +| gsarti/flores_101_kat | kat | byte_perplexity ↓ | 2.051 | 2.466 | +| gsarti/flores_101_kaz | kaz | byte_perplexity ↓ | 3.039 | 4.376 | +| gsarti/flores_101_kea | kea | byte_perplexity ↓ | 7.147 | 9.632 | +| gsarti/flores_101_khm | khm | byte_perplexity ↓ | 3.367 | 2.646 | +| gsarti/flores_101_kir | kir | byte_perplexity ↓ | 3.241 | 4.522 | +| gsarti/flores_101_kor | kor | byte_perplexity ↓ | 2.902 | 3.376 | +| gsarti/flores_101_lao | lao | byte_perplexity ↓ | 2.331 | 3.106 | +| gsarti/flores_101_lav | lav | byte_perplexity ↓ | 5.224 | 4.811 | +| gsarti/flores_101_lin | lin | byte_perplexity ↓ | 4.847 | 8.871 | +| gsarti/flores_101_lit | lit | byte_perplexity ↓ | 4.543 | 5.183 | +| gsarti/flores_101_ltz | ltz | byte_perplexity ↓ | 5.591 | 7.158 | +| gsarti/flores_101_lug | lug | byte_perplexity ↓ | 5.43 | 7.399 | +| gsarti/flores_101_luo | luo | byte_perplexity ↓ | 12.031 | 11.951 | +| gsarti/flores_101_mal | mal | byte_perplexity ↓ | 4.794 | 2.054 | +| gsarti/flores_101_mar | mar | byte_perplexity ↓ | 6.857 | 2.274 | +| gsarti/flores_101_mkd | mkd | byte_perplexity ↓ | 2.335 | 2.538 | +| gsarti/flores_101_mlt | mlt | byte_perplexity ↓ | 9.041 | 5.996 | +| gsarti/flores_101_mon | mon | byte_perplexity ↓ | 3.095 | 4.519 | +| gsarti/flores_101_mri | mri | byte_perplexity ↓ | 5.266 | 4.438 | +| gsarti/flores_101_msa | msa | byte_perplexity ↓ | 2.222 | 2.935 | +| gsarti/flores_101_mya | mya | byte_perplexity ↓ | 2.523 | 2.413 | +| gsarti/flores_101_nld | nld | byte_perplexity ↓ | 2.799 | 2.293 | +| gsarti/flores_101_nob | nob | byte_perplexity ↓ | 3.629 | 2.593 | +| gsarti/flores_101_npi | npi | byte_perplexity ↓ | 6.666 | 2.499 | +| gsarti/flores_101_nso | nso | byte_perplexity ↓ | 5.015 | 8.485 | +| gsarti/flores_101_nya | nya | byte_perplexity ↓ | 4.938 | 7.548 | +| gsarti/flores_101_oci | oci | byte_perplexity ↓ | 3.607 | 4.936 | +| gsarti/flores_101_orm | orm | byte_perplexity ↓ | 11.316 | 7.145 | +| gsarti/flores_101_ory | ory | byte_perplexity ↓ | 5.982 | 2.668 | +| gsarti/flores_101_pan | pan | byte_perplexity ↓ | 4.772 | 2.782 | +| gsarti/flores_101_pol | pol | byte_perplexity ↓ | 3.012 | 2.432 | +| gsarti/flores_101_por | por | byte_perplexity ↓ | 1.841 | 2.178 | +| gsarti/flores_101_pus | pus | byte_perplexity ↓ | 4.624 | 4.785 | +| gsarti/flores_101_ron | ron | byte_perplexity ↓ | 3.05 | 2.197 | +| gsarti/flores_101_rus | rus | byte_perplexity ↓ | 1.708 | 1.689 | +| gsarti/flores_101_slk | slk | byte_perplexity ↓ | 4.038 | 3.419 | +| gsarti/flores_101_slv | slv | byte_perplexity ↓ | 4.141 | 3.582 | +| gsarti/flores_101_sna | sna | byte_perplexity ↓ | 4.711 | 5.588 | +| gsarti/flores_101_snd | snd | byte_perplexity ↓ | 4.206 | 5.667 | +| gsarti/flores_101_som | som | byte_perplexity ↓ | 9.154 | 4.788 | +| gsarti/flores_101_spa | spa | byte_perplexity ↓ | 1.796 | 2.098 | +| gsarti/flores_101_srp | srp | byte_perplexity ↓ | 2.241 | 2.688 | +| gsarti/flores_101_swe | swe | byte_perplexity ↓ | 3.345 | 2.468 | +| gsarti/flores_101_swh | swh | byte_perplexity ↓ | 2.684 | 4.473 | +| gsarti/flores_101_tam | tam | byte_perplexity ↓ | 5.165 | 2.024 | +| gsarti/flores_101_tel | tel | byte_perplexity ↓ | 6.81 | 2.407 | +| gsarti/flores_101_tgk | tgk | byte_perplexity ↓ | 3.785 | 4.899 | +| gsarti/flores_101_tgl | tgl | byte_perplexity ↓ | 3.75 | 2.738 | +| gsarti/flores_101_tha | tha | byte_perplexity ↓ | 2.104 | 2.035 | +| gsarti/flores_101_tur | tur | byte_perplexity ↓ | 3.318 | 2.622 | +| gsarti/flores_101_ukr | ukr | byte_perplexity ↓ | 2.089 | 1.93 | +| gsarti/flores_101_umb | umb | byte_perplexity ↓ | 11.766 | 11.64 | +| gsarti/flores_101_urd | urd | byte_perplexity ↓ | 1.779 | 2.982 | +| gsarti/flores_101_uzb | uzb | byte_perplexity ↓ | 8.5 | 13.209 | +| gsarti/flores_101_vie | vie | byte_perplexity ↓ | 1.659 | 2.229 | +| gsarti/flores_101_wol | wol | byte_perplexity ↓ | 6.142 | 13.945 | +| gsarti/flores_101_xho | xho | byte_perplexity ↓ | 4.69 | 8.42 | +| gsarti/flores_101_yor | yor | byte_perplexity ↓ | 4.361 | 7.636 | +| gsarti/flores_101_zho_simpl | zho_simpl | byte_perplexity ↓ | 2.118 | 5.113 | +| gsarti/flores_101_zho_trad | zho_trad | byte_perplexity ↓ | 2.274 | 5.67 | +| gsarti/flores_101_zul | zul | byte_perplexity ↓ | 6.017 | 7.341 | +| headqa | esp | acc ↑ | 0.346 | 0.244 | +| hellaswag | eng | acc ↑ | 0.535 | 0.592 | +| lambada_mt_de | deu | acc ↑ | 0.329 | 0.358 | +| lambada_mt_en | eng | acc ↑ | 0.672 | 0.747 | +| lambada_mt_es | esp | acc ↑ | 0.476 | 0.397 | +| lambada_mt_it | ita | acc ↑ | 0.406 | 0.409 | +| logiqa | eng | acc ↑ | 0.235 | 0.244 | +| mathqa | eng | acc ↑ | 0.277 | 0.268 | +| mc_taco | eng | em ↑ | 0.131 | 0.124 | +| mnli (Median of 15 prompts) | eng | acc ↑ | 0.355 | 0.36 | +| mnli_mismatched (Median of 15 prompts) | eng | acc ↑ | 0.355 | 0.36 | +| mrpc | eng | acc ↑ | 0.387 | 0.446 | +| multirc (Median of 11 prompts) | eng | acc ↑ | 0.571 | 0.599 | +| openbookqa | eng | acc ↑ | 0.312 | 0.322 | +| piqa | eng | acc ↑ | 0.781 | 0.791 | +| prost | eng | acc ↑ | 0.298 | 0.299 | +| pubmedqa | eng | acc ↑ | 0.741 | 0.709 | +| qnli | eng | acc ↑ | 0.517 | 0.554 | +| qqp (Median of 7 prompts) | eng | acc ↑ | 0.588 | 0.395 | +| race | eng | acc ↑ | 0.39 | 0.402 | +| rte (Median of 6 prompts) | eng | acc ↑ | 0.52 | 0.495 | +| sciq | eng | acc ↑ | 0.936 | 0.948 | +| sst (Median of 6 prompts) | eng | acc ↑ | 0.604 | 0.647 | +| triviaqa | eng | acc ↑ | 0.183 | 0.342 | +| tydiqa_primary (Median of 16 prompts) | eng | acc ↑ | 0.281 | 0.148 | +| webqs | eng | acc ↑ | 0.062 | 0.159 | +| wic (Median of 11 prompts) | eng | acc ↑ | 0.506 | 0.498 | +| winogrande | eng | acc ↑ | 0.71 | 0.736 | +| wnli (Median of 6 prompts) | eng | acc ↑ | 0.57 | 0.563 | +| wsc (Median of 11 prompts) | eng | acc ↑ | 0.519 | 0.413 | +| humaneval | python | pass@1 ↑ | 0.155 | 0.0 | +| humaneval | python | pass@10 ↑ | 0.322 | 0.0 | +| humaneval | python | pass@100 ↑ | 0.555 | 0.003 | diff --git a/evaluation/results/tr11/bloom350m/humaneval_temp08.json b/evaluation/results/tr11/bloom350m/humaneval_temp08.json new file mode 100644 index 0000000000000000000000000000000000000000..a4236c4bb5761a9f485a9b4a1e9bfec1af99a4b1 --- /dev/null +++ b/evaluation/results/tr11/bloom350m/humaneval_temp08.json @@ -0,0 +1 @@ +{"pass@1": 0.004573170731707317, "pass@10": 0.025074764360845308, "pass@100": 0.05906180468454194} \ No newline at end of file diff --git a/evaluation/results/tr11/templates.txt b/evaluation/results/tr11/templates.txt new file mode 100644 index 0000000000000000000000000000000000000000..09200f59cfa179c682b76a28261a737fab9902b5 --- /dev/null +++ b/evaluation/results/tr11/templates.txt @@ -0,0 +1,65 @@ +piaf,None,None,"Given_above_context" +piaf,None,None,"given_passage_answer" +piaf,None,None,"context_follow_q" +piaf,None,None,"after_reading" +piaf,None,None,"extract_the_answer" +GEM/wiki_lingua,ar,ar,"article_summary_ar" +GEM/wiki_lingua,ar,ar,"write_abstract_ar" +GEM/wiki_lingua,ar,ar,"summarize_above_ar" +GEM/wiki_lingua,ar,ar,"rephrase_ar" +GEM/wiki_lingua,ar,ar,"tldr_ar" +GEM/wiki_lingua,en,en,"article_summary_en" +GEM/wiki_lingua,en,en,"write_abstract_en" +GEM/wiki_lingua,en,en,"summarize_above_en" +GEM/wiki_lingua,en,en,"rephrase_en" +GEM/wiki_lingua,en,en,"tldr_en" +GEM/wiki_lingua,es,es,"article_summary_es" +GEM/wiki_lingua,es,es,"write_abstract_es" +GEM/wiki_lingua,es,es,"summarize_above_es" +GEM/wiki_lingua,es,es,"rephrase_es" +GEM/wiki_lingua,es,es,"tldr_es" +GEM/wiki_lingua,fr,fr,"article_summary_fr" +GEM/wiki_lingua,fr,fr,"write_abstract_fr" +GEM/wiki_lingua,fr,fr,"summarize_above_fr" +GEM/wiki_lingua,fr,fr,"rephrase_fr" +GEM/wiki_lingua,fr,fr,"tldr_fr" +GEM/wiki_lingua,hi,hi,"article_summary_hi" +GEM/wiki_lingua,hi,hi,"write_abstract_hi" +GEM/wiki_lingua,hi,hi,"summarize_above_hi" +GEM/wiki_lingua,hi,hi,"rephrase_hi" +GEM/wiki_lingua,hi,hi,"tldr_hi" +GEM/wiki_lingua,id,id,"article_summary_id" +GEM/wiki_lingua,id,id,"write_abstract_id" +GEM/wiki_lingua,id,id,"summarize_above_id" +GEM/wiki_lingua,id,id,"rephrase_id" +GEM/wiki_lingua,id,id,"tldr_id" +GEM/wiki_lingua,pt,pt,"article_summary_pt" +GEM/wiki_lingua,pt,pt,"write_abstract_pt" +GEM/wiki_lingua,pt,pt,"summarize_above_pt" +GEM/wiki_lingua,pt,pt,"rephrase_pt" +GEM/wiki_lingua,pt,pt,"tldr_pt" +GEM/wiki_lingua,vi,vi,"article_summary_vi" +GEM/wiki_lingua,vi,vi,"write_abstract_vi" +GEM/wiki_lingua,vi,vi,"summarize_above_vi" +GEM/wiki_lingua,vi,vi,"rephrase_vi" +GEM/wiki_lingua,vi,vi,"tldr_vi" +GEM/wiki_lingua,zh,zh,"article_summary_zh" +GEM/wiki_lingua,zh,zh,"write_abstract_zh" +GEM/wiki_lingua,zh,zh,"summarize_above_zh" +GEM/wiki_lingua,zh,zh,"rephrase_zh" +GEM/wiki_lingua,zh,zh,"tldr_zh" +GEM/web_nlg,en,en,"use-category" +GEM/web_nlg,en,en,"PALM_prompt" +GEM/web_nlg,en,en,"very-explicit-description" +GEM/web_nlg,en,en,"explicit-graph-description2" +GEM/web_nlg,en,en,"non-explicit-description" +GEM/web_nlg,ru,ru,"use-category" +GEM/web_nlg,ru,ru,"PAM-russian" +GEM/web_nlg,ru,ru,"PALM_prompt" +GEM/web_nlg,ru,ru,"explicit-graph-description-2-Russian" +GEM/web_nlg,ru,ru,"explicit-graph-description" +wmt14,fr-en,fr-en,"version-en-fr-target" +wmt14,fr-en,fr-en,"a_good_translation-fr-en-target" +wmt14,fr-en,fr-en,"a_good_translation-en-fr-source+target" +wmt14,fr-en,fr-en,"xglm-en-fr-source-target" +wmt14,fr-en,fr-en,"gpt3-en-fr" diff --git a/evaluation/results/tr13/lmeval/download_bslmeval.slurm b/evaluation/results/tr13/lmeval/download_bslmeval.slurm new file mode 100644 index 0000000000000000000000000000000000000000..d388158ca7674d2b44f28f4b12859e6eecbc155b --- /dev/null +++ b/evaluation/results/tr13/lmeval/download_bslmeval.slurm @@ -0,0 +1,37 @@ +#!/bin/bash +#SBATCH --job-name=download-bslmeval +#SBATCH --partition=prepost +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=10 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@cpu + +set -x -e + +echo "START TIME: $(date)" + +source $six_ALL_CCFRWORK/start-py38-pt111 +conda activate muennighofflmeval + +#export HF_DATASETS_OFFLINE=1 +#export TRANSFORMERS_OFFLINE=1 + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export TOKENIZERS_PARALLELISM=false + +cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/lm-evaluation-harness + +# GEM/web_nlg_en,GEM/web_nlg_en_challenge_test_numbers,GEM/web_nlg_en_challenge_test_scramble,GEM/web_nlg_en_challenge_validation_sample,GEM/web_nlg_ru,GEM/web_nlg_ru_challenge_test_scramble,GEM/web_nlg_ru_challenge_validation_sample,GEM/wiki_auto_asset_turk_challenge_test_asset_backtranslation,GEM/wiki_auto_asset_turk_challenge_test_asset_bfp02,GEM/wiki_auto_asset_turk_challenge_test_asset_bfp05,GEM/wiki_auto_asset_turk_challenge_test_asset_nopunc,GEM/wiki_auto_asset_turk_challenge_test_turk_backtranslation,GEM/wiki_auto_asset_turk_challenge_test_turk_bfp02,GEM/wiki_auto_asset_turk_challenge_test_turk_bfp05,GEM/wiki_auto_asset_turk_challenge_test_turk_nopunc,GEM/wiki_auto_asset_turk_test_asset,GEM/wiki_auto_asset_turk_test_turk,GEM/wiki_lingua_ar,GEM/wiki_lingua_cs,GEM/wiki_lingua_de,GEM/wiki_lingua_en,GEM/wiki_lingua_es,GEM/wiki_lingua_fr,GEM/wiki_lingua_hi,GEM/wiki_lingua_id,GEM/wiki_lingua_it,GEM/wiki_lingua_ja,GEM/wiki_lingua_ko,GEM/wiki_lingua_nl,GEM/wiki_lingua_pt,GEM/wiki_lingua_ru,GEM/wiki_lingua_th,GEM/wiki_lingua_tr,GEM/wiki_lingua_vi,GEM/wiki_lingua_zh,gem_xsum,gem_xsum_challenge_sample,gem_xsum_challenge_test_backtranslation,gem_xsum_challenge_test_bfp_02,gem_xsum_challenge_test_bfp_05,gem_xsum_challenge_test_covid,gem_xsum_challenge_test_nopunc \ +python3 main.py --model hf-causal \ + --model_args pretrained=hf-internal-testing/tiny-random-gpt2,use_accelerate=True,tokenizer=hf-internal-testing/tiny-random-gpt2,dtype=float16 \ + --tasks wmt14_fr_en,wmt19_ru_en,wmt19_zh_en \ + --device cuda \ + --limit 1 \ + --no_cache \ + --num_fewshot 0 diff --git a/evaluation/results/tr13/tzeroeval/convert_validation_2b5.slurm b/evaluation/results/tr13/tzeroeval/convert_validation_2b5.slurm new file mode 100644 index 0000000000000000000000000000000000000000..e6373a8ee5154e65fb60fb54f781d13c91ca3974 --- /dev/null +++ b/evaluation/results/tr13/tzeroeval/convert_validation_2b5.slurm @@ -0,0 +1,352 @@ +#!/bin/bash +#SBATCH --job-name=ckpts +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=ajs@cpu +#SBATCH --partition=cpu_p1 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +conda activate muennighoffmodelconv + +CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13c-2B5-ml-t0/checkpoints/xp3capmixnewcodelonglossseq +#CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/p31lossseq + +CKPTS=( +global_step250 +global_step500 +global_step750 +global_step1000 +global_step1250 +global_step1500 +global_step1750 +global_step2000 +global_step2250 +global_step2500 +global_step2750 +global_step3000 +) +EXAMPLE_CKPT=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/2b5/bloom-2b5 +DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/2b5t0 +OUT_PREFIX=xp3capmixlossseq_ +#OUT_PREFIX=p31lossseq + +TP=1 + +### CONVERT ### + + +for i in {0..11}; do +CKPT=${CKPTS[$i]} +echo "$i" +echo "Running $CKPT" + +OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT" +python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json + +# Copy tokenizer.json etc +cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/ + +eval_script="./eval_$i.slurm" +cat < $eval_script +#!/bin/bash +#SBATCH --job-name=evaluate_t0 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=8 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:1 # number of gpus +#SBATCH --constraint=a100 +#SBATCH --time 5:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 +#SBATCH --array=0-168 + +set -x -e + +source $six_ALL_CCFRWORK/start-py38-pt111 +conda activate thomas_t_zero_evaluation + +CHECKPOINT_PATH=$OUTPUTCKPT + +WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0 +pushd "\$WORKDIR" +OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation" +mkdir -p "\$OUTPUT_DIR" + +# Validation +DATASETS_AND_CONFIGS_VAL=( +head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation +head_qa,en,en,"multiple_choice_q_and_a_en",validation +head_qa,en,en,"multiple_choice_q_and_a_index_en",validation +head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation +head_qa,en,en,"multiple_choice_a_and_q_en",validation +head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation +head_qa,es,en,"multiple_choice_q_and_a_en",validation +head_qa,es,en,"multiple_choice_q_and_a_index_en",validation +head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation +head_qa,es,en,"multiple_choice_a_and_q_en",validation +climate_fever,None,None,"first_evidence_and_claim_itemization",test +climate_fever,None,None,"claim_and_all_supporting_evidences",test +climate_fever,None,None,"fifth_evidence_and_claim_itemization",test +climate_fever,None,None,"third_evidence_claim_pair",test +climate_fever,None,None,"second_evidence_and_claim_itemization",test +codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train +codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train +codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train +aqua_rat,raw,None,"select_the_best_option",validation +aqua_rat,raw,None,"answer_quiz",validation +aqua_rat,raw,None,"Answer questions from options",validation +commonsense_qa,None,None,"answer_given_question_without_options",validation +commonsense_qa,None,None,"question_answering",validation +commonsense_qa,None,None,"most_suitable_answer",validation +amazon_reviews_multi,en,en,"prompt_title_to_star",validation +amazon_reviews_multi,en,en,"prompt_review_to_star",validation +amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation +amazon_reviews_multi,zh,en,"prompt_title_to_star",validation +amazon_reviews_multi,zh,en,"prompt_review_to_star",validation +amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation +amazon_reviews_multi,fr,en,"prompt_title_to_star",validation +amazon_reviews_multi,fr,en,"prompt_review_to_star",validation +amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation +amazon_reviews_multi,es,en,"prompt_title_to_star",validation +amazon_reviews_multi,es,en,"prompt_review_to_star",validation +amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation +art,None,None,"choose_hypothesis_options",validation +art,None,None,"choose_hypothesis_believable",validation +art,None,None,"choose_hypothesis",validation +art,None,None,"choose_hypothesis_desc",validation +art,None,None,"choose_hypothesis_likely",validation +banking77,None,None,"help_page_topic",test +banking77,None,None,"direct_to_which_department",test +banking77,None,None,"rephrase_as_banking_term",test +blbooksgenre,title_genre_classifiction,None,"multi-choice",train +blbooksgenre,title_genre_classifiction,None,"premise_context_first",train +blbooksgenre,title_genre_classifiction,None,"classify",train +blimp,adjunct_island,None,"grammatical_between_1_2",train +blimp,adjunct_island,None,"grammatical_between_A_B",train +blimp,adjunct_island,None,"grammatical_which_one_1_2",train +blimp,adjunct_island,None,"single_sentence_bad_yes_no",train +blimp,adjunct_island,None,"single_sentence_good_yes_no",train +conv_ai_3,None,None,"clarification_needed",validation +conv_ai_3,None,None,"score_give_number",validation +conv_ai_3,None,None,"ambiguous",validation +conv_ai_3,None,None,"directly_answer",validation +conv_ai_3,None,None,"score_how_much",validation +craigslist_bargains,None,None,"good deal for seller no list price implicit",validation +craigslist_bargains,None,None,"good deal for seller no list price",validation +craigslist_bargains,None,None,"good deal for seller",validation +craigslist_bargains,None,None,"best deal",validation +ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation +ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation +ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation +ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation +ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation +emo,None,None,"persons_describe",validation +emo,None,None,"final_message",validation +emo,None,None,"what_emotion_do_you_think",validation +emo,None,None,"emotional_state",validation +emo,None,None,"dialogue_between",validation +emotion,None,None,"choose_the_best_emotion_label",test +emotion,None,None,"reply_with_emoation_label",test +emotion,None,None,"answer_with_class_label",test +emotion,None,None,"answer_question_with_emotion_label",test +financial_phrasebank,sentences_allagree,None,"share_price_option",train +financial_phrasebank,sentences_allagree,None,"sentiment",train +financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train +financial_phrasebank,sentences_allagree,None,"complementary_industries",train +financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train +glue,cola,None,"Make sense yes no",validation +glue,cola,None,"is_this_correct",validation +glue,cola,None,"editing",validation +glue,cola,None,"Following sentence acceptable",validation +glue,cola,None,"Previous sentence acceptable",validation +glue,sst2,None,"positive negative after",validation +glue,sst2,None,"review",validation +glue,sst2,None,"said",validation +glue,sst2,None,"following positive negative",validation +glue,sst2,None,"happy or mad",validation +health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation +health_fact,None,None,"claim_explanation_classification",validation +health_fact,None,None,"claim_veracity_classification_tell_me",validation +hlgd,None,None,"is_same_event_with_time_interrogative_related",validation +hlgd,None,None,"is_same_event_interrogative_talk",validation +hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation +hlgd,None,None,"is_same_event_refer",validation +hlgd,None,None,"is_same_event_editor_asks",validation +hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train +hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train +hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train +hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train +hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train +liar,None,None,"Given statement guess category",validation +lince,sa_spaeng,None,"original poster expressed sentiment",validation +lince,sa_spaeng,None,"sentiment trying to express",validation +lince,sa_spaeng,None,"express sentiment",validation +lince,sa_spaeng,None,"negation template",validation +lince,sa_spaeng,None,"the author seem",validation +math_qa,None,None,"choose_correct_og",test +math_qa,None,None,"pick_the_correct",test +math_qa,None,None,"first_choice_then_problem",test +math_qa,None,None,"problem_set_type",test +math_qa,None,None,"gre_problem",test +movie_rationales,None,None,"Standard binary sentiment analysis",validation +movie_rationales,None,None,"Evidences sentiment classification",validation +movie_rationales,None,None,"Evidences + review",validation +movie_rationales,None,None,"Generate evidences and sentiment",validation +mwsc,None,None,"in-the-sentence-question-first",validation +mwsc,None,None,"what-think",validation +mwsc,None,None,"in-the-sentence",validation +mwsc,None,None,"options-or",validation +mwsc,None,None,"is-correct",validation +poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation +poem_sentiment,None,None,"question_answer_format",validation +poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation +poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation +poem_sentiment,None,None,"most_appropriate_sentiment",validation +onestop_english,None,None,"esl_context",train +onestop_english,None,None,"ara_context",train +onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train +onestop_english,None,None,"esl_variation",train +onestop_english,None,None,"assess",train +pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train +pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train +riddle_sense,None,None,"most_suitable_answer",validation +riddle_sense,None,None,"answer_given_question_without_options",validation +riddle_sense,None,None,"question_to_answer_index",validation +riddle_sense,None,None,"question_answering",validation +scicite,None,None,"Classify intent w/section (select choice)",validation +scicite,None,None,"Classify intent (choices first)",validation +scicite,None,None,"Classify intent (select choice)",validation +scicite,None,None,"Classify intent",validation +scicite,None,None,"can_describe",validation +selqa,answer_selection_analysis,None,"is-he-talking-about",validation +selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation +selqa,answer_selection_analysis,None,"make-sense-rand",validation +selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation +snips_built_in_intents,None,None,"voice_intent",train +snips_built_in_intents,None,None,"categorize_query",train +snips_built_in_intents,None,None,"intent_query",train +snips_built_in_intents,None,None,"categorize_query_brief",train +snips_built_in_intents,None,None,"query_intent",train +) + +DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}" +echo "\$ARGUMENT" + +# Run T0 evaluation +# For PrefixLM add --prefixlm +IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}" +python t-zero/evaluation/run_eval.py \ + --dataset_name "\$dataset_name" \ + --dataset_config_name "\$dataset_config_name" \ + --template_config_name "\$template_config_name" \ + --template_name "\$template_name" \ + --split "\$split" \ + --model_name_or_path "\$CHECKPOINT_PATH" \ + --output_dir "\$OUTPUT_DIR" \ + --per_device_eval_batch_size 4 \ + --max_length 2048 \ + --dtype float16 +EOT + +sbatch $eval_script + + +lm_eval_script="./lm_eval_$i.slurm" +cat < $lm_eval_script +#!/bin/bash +#SBATCH --job-name=lmeval +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=8 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:1 # number of gpus +#SBATCH --constraint=a100 +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 +#SBATCH --array=0-22 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +conda activate muennighofflmevalgen + +echo "START TIME: $(date)" + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +export TOKENIZERS_PARALLELISM=false + +# Converted transformer checkpoint +MODEL_CKPT=$OUTPUTCKPT + +cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness + + +DATASETS_AND_CONFIGS=( +wmt14_fr_en,fr-en,"version-en-fr-target" +wmt14_fr_en,fr-en,"a_good_translation-en-fr-target" +wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target" +wmt14_fr_en,fr-en,"xglm-en-fr-target" +wmt14_fr_en,fr-en,"gpt3-en-fr" +wmt14_fr_en,fr-en,"version-fr-en-target" +wmt14_fr_en,fr-en,"a_good_translation-fr-en-target" +wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target" +wmt14_fr_en,fr-en,"xglm-fr-en-target" +wmt14_fr_en,fr-en,"gpt3-fr-en" +wmt14_hi_en,hi-en,"version-en-hi-target" +wmt14_hi_en,hi-en,"a_good_translation-en-hi-target" +wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target" +wmt14_hi_en,hi-en,"xglm-en-hi-target" +wmt14_hi_en,hi-en,"gpt-3-en-hi-target" +wmt14_hi_en,hi-en,"version-hi-en-target" +wmt14_hi_en,hi-en,"a_good_translation-hi-en-target" +wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target" +wmt14_hi_en,hi-en,"xglm-hi-en-target" +wmt14_hi_en,hi-en,"gpt-3-hi-en-target" +mlsum_es,"es","layman_summ_es" +mlsum_es,"es","palm_prompt" +mlsum_es,"es","summarise_this_in_es_few_sentences" +) + +DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}" +echo "\$ARGUMENT" + +IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}" + +# Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109 +python main.py \ + --model_api_name 'hf-causal' \ + --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=float16" \ + --device cuda \ + --batch_size 16 \ + --no_tracking \ + --task_name "\$dataset_name" \ + --template_names "\$template_name" \ + --bootstrap_iters 10 \ + --limit 3000 + +mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name" +mv "outputs/*$CKPT*\$dataset_name*" "$OUTPUTCKPT/evaluation/\$dataset_name/" + +echo "END TIME: $(date)" +EOT + +sbatch $lm_eval_script + + +done diff --git a/train/tr1-13B-base/README.md b/train/tr1-13B-base/README.md new file mode 100644 index 0000000000000000000000000000000000000000..762fbb47ef5551ba86469bd6e4717d7e5a5ee8eb --- /dev/null +++ b/train/tr1-13B-base/README.md @@ -0,0 +1,850 @@ +# Train 1 - 13B - unmodified Megatron gpt2 - baseline + + +## Task + +Auto-regressive objective using regular Megatron-LM GPT2 language model + +## Environment + +To launch the environment use [start-tr1-13B](./start-tr1-13B) + +``` +source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B +``` + +We are using the following branches specific to this training: + +- `$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B` a frozen `tr1-13B` branch - can cherry pick from `main` if need be. +- `$six_ALL_CCFRWORK/code/tr1-13B/DeepSpeed-big-science` - a mostly frozen `big-science` branch - under Deepspeed's team control - so it may also require a specific SHA if something gets broken upstream. + + +How the environment was built: +``` +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda + +conda create -y -n tr1-13B python=3.8 +conda activate tr1-13B +conda install pytorch==1.8.1 torchvision cudatoolkit=10.2 -c pytorch -y +pip install deepspeed +pip install tensorboard + +mkdir $six_ALL_CCFRWORK/code/tr1-13B + +cd $six_ALL_CCFRWORK/code/tr1-13B +git clone https://github.com/bigscience-workshop/bigscience + +cd $six_ALL_CCFRWORK/code/tr1-13B +git clone https://github.com/huggingface/transformers +cd transformers +pip install -e . + +cd $six_ALL_CCFRWORK/code/tr1-13B +git clone https://github.com/bigscience-workshop/Megatron-DeepSpeed Megatron-DeepSpeed-tr1-13B +cd Megatron-DeepSpeed-tr1-13B +git checkout tr1-13B +pip install -r requirements.txt +pip install -e . +mkdir data +cd data +wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json +wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt +``` + +`apex` and `deepspeed` build require an instance w/ beefy cpu and internet (unless cloned beforehand), so continue on the `prepost` partition: + +``` +ssh jean-zay-pp +conda activate tr1-13B +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda + +cd $six_ALL_CCFRWORK/code/tr1-13B +git clone https://github.com/microsoft/DeepSpeed DeepSpeed-big-science +cd DeepSpeed-big-science +git checkout big-science +rm -rf build +TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log + +cd $six_ALL_CCFRWORK/code/tr1-13B +git clone https://github.com/NVIDIA/apex +cd apex +pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log + +#cp $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B ... + +``` + + +## Architecture + +Config: + +``` +NLAYERS=40 +NHIDDEN=5120 +NHEADS=32 +FFN_HIDDEN_SIZE=20480 + +# --ffn_hidden_size $FFN_HIDDEN_SIZE \ +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --num-attention-heads $NHEADS \ + [...] + " +``` + +Sanity check: +``` +$ VOCAB_SIZE=50257 NLAYERS=40 NHIDDEN=5120 NHEADS=32 SEQ_LEN=2048; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')" +Model size: 13B +``` + + + +## Sequence Length + +Default Megatron-LM language model with 2048 tokens sequence length + +``` +SEQ_LEN=2048 + + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + +``` + + +## Global batch size + +GBS = Global Batch Size + +Use a schedule: + +- start from 32k tokens (gbs=16) +- increase linearly to 2048k (gbs=1024) over 5M samples (for a total of ~10B tokens / 5k steps) +- then continue at 2048k (gbs=1024) for 145M samples (290B tokens / 145K steps) + +Total: 300B tokens (150K steps) + +Note: the training script wasn't updated when we flipped seqlen/gbs from 1024/2048 to 2048/1024, so we are currently planning to train for 300K steps (samples) and 600B tokens. But since longer doesn't impact anything, we will just stop at half the time. I updated the document to use the right 150K number so we don't repeat this mistake in the next training. + +syntax: +``` +--rampup-batch-size +``` + +At seqlen 2048 (1k tokens is bs=1), we get: + +``` + --rampup-batch-size 16 16 5_000_000 \ + --global-batch-size 1024 \ +``` + +This means it will start with global batch size 16 and over 63 (`(1024-16)/16`) intervals will increase the +batch size by 16 linearly to 1024. + +79365 (`5_000_000/63`) is the number of samples before the next GBS increment. That is we run at GBS=16 for 79365 samples, or 4960 steps (`79365/16`). Then we run at GBS=32 for 79365 samples, or 2480 steps. Then 1653 steps at GBS=48, 1240 at GBS=64, etc.... + +Notes: +* `--rampup-batch-size` requires the use of `--train-samples` and can't be used with `--train-iters`. +* global batch size has to be divisible by micro-batch-size * DP_SIZE + +Important: the software will fail if GBS is not divisible by `MBS * DP_SIZE`. +Though Jared's recommendation is to use MBS=1 and then it's much easier to match GBS/DP_SIZE even at GBS=16. + +`DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE)` + +Since the increments are in GBS=16, we can do only DP_SIZE=16, which means that at most we can use 32 nodes (`32*4/(4*2)=16`). + +Once GBS reaches 1024, we can use up to 8192 GPUs (1024*2*4), so we will be able to switch to 64 nodes or may be even 128 nodes (4 gpus each). We can't use any number of nodes between 64 and 128 though, because the number has to be 2**X. So 96 nodes won't work, because it has a multiplier of 3 there. + + + + +## Checkpoints + +We need the checkpoints: + +1. in order to be able to resume the training when the training is prematurely stopped for whatever reason. +2. In addition a special saving schedule has been requested by the interpretabity group. + +Because there are 3 different schedules, and Megatron-LM has only fixed checkpoint saving schedule, we will need 3 different run scripts, to be launched in a sequence, each starting once the previous has finished. + +1. steps 1-100 - 10 checkpoints, interval 10 steps +2. steps 101-1000 - 50 checkpoints, interval 18 steps +3. steps 1001-150K - 100+ checkpoints, interval 1500 steps +4. if still needed, can continue with schedule 3 + +note: the interoperability study doesn't care for checkpoints in the range of 1k-20k, so we only save those to be able to restart the training. + +It'd have been +``` +ROUND=1 +if [[ ${ROUND} == 1 ]]; then TRAIN_ITER=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then TRAIN_ITER=1000 SAVE_INTERVAL=18 +elif [[ ${ROUND} == 3 ]]; then TRAIN_ITER=150000 SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + --train-iter $TRAIN_ITER \ + --save-interval $SAVE_INTERVAL \ +``` + +Unfortunately, `--rampup-batch-size` can't work with `--train-iter` and we have to use `--train-samples` instead. It has to be fixed through all of trainings and can't be changed, otherwise resume from checkpoint will break. + +So the only thing left is to use `--exit-interval` which is in steps. + +Which gives us the three rounds: + +``` +ROUND=1 +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then EXIT_INTERVAL=900 SAVE_INTERVAL=18 +elif [[ ${ROUND} == 3 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + + --train-samples 150_000_000 \ + --exit-interval $EXIT_INTERVAL \ + --save-interval $SAVE_INTERVAL \ +``` + +`--exit-interval` counts steps only for the current run, regardless of previous steps. So to stop at effective step 1000, the second round we tell it to exit at 900 (the first round did the first 100). + +And unfortunately, this proved to be not supported by Megatron-LM either at the moment. There are a few possible ways to approach this: + +1. One approach is to simply use 3 independent trainings, while using the same `--seed ` and just have `--exit_interval` as above. Though after each training moving the checkpoints away. + +2. +XXX: Also megatron code could be extended to implement `--exit-samples` - so sample-based exit strategy + +3. Yet another approach is to do it manually. Kill the training after 100, and then restart and kill after 900 iterations, while changing the save interval, and manually fixing up the `checkpoints/latest` to point to the correct checkpoint - since the manual killing might have a few extra checkpoints. So the recipe to follow: + +``` +ROUND=1 +if [[ ${ROUND} == 1 ]]; then SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=18 +elif [[ ${ROUND} == 3 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + + --train-samples 150_000_000 \ + --save-interval $SAVE_INTERVAL \ +``` + +(could also do it with 3 parallel jobs by using the same seed!) + +``` +--seed 42 +``` + +Therefore do this manually: + +0. +* delete the old checkpoints `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` + +1. + +* set to `ROUND=1` +* `sbatch tr1-13B-round1.slurm` +* run for 100+ steps +* scancel the job +* clean up `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` to remove any checkpoints beyond 100 +* make sure `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/latest` contains 100 + + +2. + +* set to `ROUND=2` +* `sbatch tr1-13B-round1.slurm` +* run for the additional 900+ steps (it's incremental, so the script already knows it started at 100) +* scancel the job +* clean up `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints` to remove any checkpoints beyond 1000 +* make sure `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/latest` contains 1000 + + +3. + +* set to `ROUND=3` +* `sbatch tr1-13B-round1.slurm` +* run normally + + + +Because it'd be potentially too demanding to export TBs of data and the intended users might not be even able to download all that data, most likely we will need to run the interpretabity post-analysis experiments on JZ and send the reports to those who need the reports. + +Megatron-LM resumes from the most recent checkpoint by default. Does it need the exact path or does it auto-discover the latest checkpoint by default. + +``` +--load path_to_check_point \ +``` + + +Remi suggests 100TB on SCRATCH shouldn't be a problem. + + + + + +## Optimizer + +- AdamW,  β1=0.9, β2=0.999 eps=1e−8 +- learning rate: + * peak=1e-4 + * warmup over 2000 steps + * cosine decay for learning rate down to 10% of its value, over 260B tokens (after 260 billion tokens, training continues at 10% of the original learning rate) +- clipping by global norm of 1 (as in GPT-3) +- weight decay of 0.1 + +We need lr-decay in samples, so tokens2samples = 260B / 2048 = 126_953_125 + +We need lr-warmup in samples, so doing the math again as in checkpoints + +2000=160*12+80 + +so we will get to 2000 in 216_320 samples `16*160*12*(12+1)/2+16*13*80` + + + +``` + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 1e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ +``` + + +## Logging + + +For now enable all tensorboard features, later we might decide to not log it all. + +We are logging: + +- lr (enabled by default) +- bs (enabled) +- loss (always) +- loss-scale (log_loss) (enabled by default) +- grad-norm (always) +- num-zeros (always) +- param-norm (always) +- timers (enabled) +- validation loss (always) +- validation ppl (perplexity) (enabled) + +almost all of these are also logged as a comparison to consumed_train_samples + +XXX: nice to have: +- throughput - Tflops/gpu or tokens + + +**Tensorboard config**: + +``` +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard + + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ +``` + +**CodeCarbon config**: + +``` +CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon + + --codecarbon-dir $CODECARBON_PATH \ +``` + + + +**Training logs** + +All training logs are piped into `$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs/main_log.txt`. + + +## Exporting + +Before starting training create cloned git repos to where output data will go. + +The last 4 should all be git repo clones +``` +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon +LOGS_PATH=$DATA_OUTPUT_PATH/logs +``` + +I created 4 repos at https://huggingface.co/bigscience/ and now we can clone those as the dirs data will be output into: + +``` +cd $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B +git clone https://huggingface.co/bigscience/tr1-13B-checkpoints checkpoints +git clone https://huggingface.co/bigscience/tr1-13B-tensorboard tensorboard +git clone https://huggingface.co/bigscience/tr1-13B-codecarbon codecarbon +git clone https://huggingface.co/bigscience/tr1-13B-logs logs +``` + +If this is your first time running git-lfs on this system, you need to init it once: +``` +module load git-lfs +git lfs install +``` + +Most of the data types we are going to sync will be large or huge, and most are already lfs-tracked by default, so no setup is required. Except our log file which too can grow large, so we need to set it up: + +``` +cd logs +git-lfs track *.txt +git commit -m "large text files" .gitattributes +git push +``` + +### Cronjobs to auto-sync the hub + +Now we just need a cronjob to automatically do for each type of data to export: + +``` +cd checkpoints +git add */*.pt +git commit -am "new data" +git push +``` + +This job is performed automatically by `hub-sync.py`. For full details see: [Automated upload to the hub](../../data/export.md#automated-upload-to-the-hub). + +**Weights checkpoints** + +Currently, we aren't exporting checkpoints. + +**Tensorboard** + +Here is the slurm script to sync the tensorboard data: [tr1-13B-hub-sync-tensorboard.slurm](./tr1-13B-hub-sync-tensorboard.slurm) + +**CodeCarbon** + +Currently the feature is not enabled, so there is nothing to log. + +**Log of logs** + +Let's also create a log of logs. We will pipe all the logs in there and also the various status reports - e.g. while SLURM is queued the training and it's not running. + +Here is the slurm script to sync the raw logs data: [tr1-13B-hub-sync-logs.slurm](./tr1-13B-hub-sync-logs.slurm) + +The main source of logs is the training scripts. The logs are gathered via +``` +$CMD ... 2>&1 | tee -a $LOGS_PATH/main_log.txt +``` +in the training slurm script. + +XXX: we could also add various other diagnostics appended to the main log file. e.g. shared memory, etc. + + + + +## Deepspeed config + +Using Deepspeed's activation checkpointing to use a lot less GPU memory + +``` + --deepspeed-activation-checkpointing \ +``` + +Possible extras: + +- Enabling `"contiguous_memory_optimization": true,` can help to reduce memory fragmentation, but it requiressetting `number_checkpoints`. This should be set to be equal to number of transformer blocks per pipeline stage times the number of pipeline parallel stage. Samyam says: Full disclaimer: I have only used this with ZeRO but not with pipeline parallelism. But by setting the number_checkpoints as described, it should work for PP too. The benefit of using it is usually only apparent when running very close to the memory limit. + + + +## Dataset + +- Full 304.2M version (529GB) : `$six_ALL_CCFRWORK/datasets-custom/oscar-en` +- Tiny 10K version (56M): `$six_ALL_CCFRWORK/datasets-custom/oscar-en-10k` + +We are using English-only subset of [the OSCAR dataset](https://huggingface.co/datasets/oscar) with full documents (*not* individual sentences). + +We have about 300M records in 1.2TB of jsonl data (about 3/4 of which are smaller than 1K tokens), which amounts to about 280B tokens (estimated at about 4.5chars/word). + +Megatron's preprocessing tool indexes everything and then at training time the Dataloader serves chunks of the desired fixed sequence length (2048 tokens in our case). + +For more information on the pre-processing process and various estimations see: [OSCAR](../../data/oscar/README.md). + + + +## Dealing with 20h SLURM limit + +First, let's ensure we save a checkpoint just before SLURM kills the job + +Let's try 19:50 1190=60*20-10 + +``` + --exit-duration-in-mins 1190 \ +``` + +For the bigger models 10min might not be long enoug to finish an iteration (assume the limit hits right as one starts) and write out a checkpoint. + +Then we need to figure out how to schedule the next slurm job as soon as the currently running one is over in 20h. + +We will use job arrays, to solve this. Let's start with just 10 such jobs: + +``` +sbatch --array=1-10%1 tr1-13B-round1.slurm +``` + +`%1` limits the number of simultaneously running tasks from this job array to 1, since we want them to run in a sequence. + +Alternatively, as always this param can be part of the script: +``` +#SBATCH --array=1-10%1 +``` + +## Crontab + +JZ doesn't have a user-accessible crontab facility, so we have to emulate it with a self-restarting slurm job that polls some dir for new jobs to run. For full details on how this works please see [Crontab Jobs](../../jz/crontab/). + +But to use it simply put your slurm scripts into either: +``` +$six_ALL_CCFRWORK/cron/cron.hourly +$six_ALL_CCFRWORK/cron/cron.daily +``` + +and the jobs will be run on hourly or daily basis. This is similar to Linux's `/etc/cron.*` setup. Except the jobs aren't guaranteed to start on the hour, but should be around that time. + +Currently we have: + +``` +ls -1 $six_ALL_CCFRWORK/cron/cron.hourly/*slurm +tr1-13B-hub-sync-logs.slurm +tr1-13B-hub-sync-tensorboard.slurm +tr1-13B-slurm-status.slurm +``` + +The first 2 sync log files to the hub and the last one monitors the health of the training and alerts of any problems. + + +## Estimated run time + +Best case scenario when training 24/7 on 64 nodes with 4 gpus each: +``` +$ python -c 'Btokens=300; Bmodel=13; n_gpus=256; Tflops=45; \ +print(f"{Btokens*1e9*8*Bmodel*1e9/(n_gpus*Tflops*1e12*60*60*24):0.2f} days")' +31.35 days +``` + +You will find the detailed explanation of the estimation formula [here](../../math/README.md#estimate-model-training-time). + +The training was much slower in the first 10k steps because of the batch size rampup, where the pipeline was very inefficient. + +And then we were only able to use 20h slurm jobs, with unpredictable gaps of wait time in between (1-30 hours!), so it's impossible to predict when the finish line will be finished. + + +## Memory usage + +During training currently we use 256GB (8x 32GB gpus) per each full replica (TP=2 + PP=4), the rest are ZeRO-DP. So if we throw x times more GPUs we just speed things up by having more 2-node replicas. +The required memory breakdown: + +1. 4B for fp32 weights +2. 2B for fp16 weights +3. 8B for optimizer states. +4. 4B for gradients (we don't save these in the checkpoint) +5. plus memory for activations and temps, which total majorly depends on the seqlen and mini batch size - and since we use activation checkpointing this memory need is quite small. + +Total: 234GB (18*13) plus activations and temps memory. So we are close to 256GB here. + +Activation memory would have been much much bigger if it weren't for activation checkpointing. + + +## Checkpoint Back Up + +To copy multiple checkpoints excluding optimizer states. First move the desired checkpoints to back up to some dedicated dir, e.g. `tr1-13B-round2/checkpoints`, then copy just the needed files: + +``` +srun -p prepost -A six@cpu --time=20:00:00 --pty bash +mkdir to-upload +rsync -acvhu --no-compress --info=progress2 --exclude "zero*pt" tr1-13B-round2/checkpoints/ to-upload +``` + +then to back those up: + +``` +cp -arun $six_ALL_CCFRSCRATCH/checkpoints/to-upload/* $six_ALL_CCFRSTORE/checkpoints/tr1-13B +``` + + +**Final checkpoint with optimizer states:** + +``` +mkdir $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim +cp -arun $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/checkpoints/global_step168000 $six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim/ +``` + +This is the final checkpoint, that can be resumed from at will: + +``` +$six_ALL_CCFRSTORE/checkpoints/tr1-13B-with-optim/global_step168000 +``` + +Here is the corresponding log: +``` + iteration 168000/ 311541 | consumed samples: 153013584 | elapsed time per iteration (ms): 13248.2 | learning rate: 1.000E-05 | global batch size: 1024 | lm loss: 2.376641E+00 | loss scale: 131072.0 | grad norm: 19767.052 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) +-------------------------------------------------------------------------------------------------- + validation loss at iteration 168000 | lm loss value: 2.342049E+00 | lm loss PPL: 1.040253E+01 | +-------------------------------------------------------------------------------------------------- +``` + +## Checkpoint Conversion and Upload + + +**Important**: there was a bug in the converter on the transformers side, so we need this fix: +https://github.com/huggingface/transformers/pull/13735 +if it's not merged yet, install this branch first. If it's already merged just make sure you use `transformers@master` - XXX: I will update the script to require a specific version once a new version of transformers is released. + + +Open a long running interactive shell: +``` +srun -p compil --cpus-per-task=40 -A six@cpu --time=6:00:00 --pty bash +``` +then convert: + +``` +cd $six_ALL_CCFRSCRATCH/checkpoints/to-upload +time find * -maxdepth 0 -type d -name "global_step*" -exec $six_ALL_CCFRWORK/code/Megatron-DeepSpeed/tools/convert_checkpoint/deepspeed_to_transformers.py --input_folder {} --output_folder hf-fixed/{} \; +``` + +It takes about 100sec per 26GB checkpoint. + +The results will be all under `hf/`. + +Now to uploading to the hub. + +Prepare the target dir: + +``` +#git -c http.extraHeader="Authorization: Basic " clone https://huggingface.co/bigscience/tr1-13B-checkpoints/ + +cd tr1-13B-checkpoints + + +huggingface-cli lfs-enable-largefiles . + +git config --unset user.email +~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*' +``` +We are going to put each checkpoint into its own branch with the same name. + +``` +mv ../hf/global_step* . +time find * -maxdepth 0 -type d -name "global_step*" -exec git checkout main \; -exec git checkout -b {} \; -exec git add {} \; -exec git commit -m "add {}" \; -exec git push --set-upstream origin {} \; +git checkout main +``` + +Fixing up failed pushes / verifying that all pushes went through, re-pushing if needed + +``` +git branch | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; git push --set-upstream origin $1]' +``` + +If `git push` fails re-run with: `GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1 git push` to see what the actual error is. + + +OK, the branch-per-checkpoint hub repo proved to be very difficult to upload and even more so using it after the upload. + +So let's try GCS bucket: + +``` +gcloud auth login +gcloud config set project bigscience +gsutil cp -r hf-fixed/* gs://bigscience-backups/tr1-13B/checkpoints/ + +``` +or via rsync: +``` +gsutil -m rsync -r hf-fixed/* gs://bigscience-backups/tr1-13B/checkpoints/ +``` + +``` +start-prod +cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/ +gsutil -m rsync -r hf-fixed1/* gs://bigscience-backups/tr1-13B/checkpoints/ + +``` + +or if needed to speed up the upload via multiple parallel copies open 2 `srun` instances and in one: +``` +gsutil cp -r hf-fixed1/* gs://bigscience-backups/tr1-13B/checkpoints/ +``` +and in another: +``` +gsutil cp -r hf-fixed2/* gs://bigscience-backups/tr1-13B/checkpoints/ +``` + +can't use `rsync` with multiple sources - can only rsync a single dir. + +Later fixing `config.json` to include the correct `gelu_fast` activation correction and rsyncing the GCS bucket. + +(moved all the hf-fixed sub-dirs into a new folder `checkpoints`) + +``` +start-prod +cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/ +perl -pi -e 's|gelu|gelu_fast|' checkpoints/*/config.json +gsutil -m rsync -x ".*bin$" -r checkpoints gs://bigscience-backups/tr1-13B/checkpoints +``` +this is really fast since we exclude the checkpoint files (`-x ".*bin$"`) + + +## Other backups + +Logs: + +``` +mkdir $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/ +tar -zcvf $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/tensorboard.tgz $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tensorboard +tar -zcvf $six_ALL_CCFRSTORE/checkpoints/tr1-13B-logs/logs.tgz $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs +``` + +note: codecarbon wasn't ready during this training, so nothing to back up there. + + +## Exports + +- GCS https://console.cloud.google.com/storage/browser/bigscience +- The Hub https://huggingface.co/bigscience + + +## Training scripts + +The training script is: + +- [tr1-13B-round1.slurm](./tr1-13B-round1.slurm) + +We also have: + +- [tr1-13B-short.slurm](./tr1-13B-short.slurm) + +which is a very small model to do quick testing and debug, but otherwise the same as the main script. + +The scripts are located at: + +``` +cd $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base +``` + +When no jobs are scheduled, currently we launch the main training script using: + +``` +sbatch --array=1-5%1 tr1-13B-round1.slurm +``` +This will schedule 5 20h-trainings which will run one at a time, once the scheduler yields to the request, with unknown wait time in between each job. + +If there is a job running already, **do not use the above command** as we can't have 2 trainings overlap. If there is a training already running you can: + +1. either tell `sbatch` to start the new job once the currently running job succeeds, using: + +``` +sbatch --dependency=CURRENTLY_RUNNING_JOB_ID --array=1-5%1 tr1-13B-round1.slurm +``` + +Where `CURRENTLY_RUNNING_JOB_ID` is the job being reported running. For example if the report of the last job is: +``` +[2021-08-16 22:08:01] tr1-13B-round3 is running for 18:15:59 since 2021-08-16T03:52:02 (711114_4 on 'gpu_p13' partition (r7i4n[1-7],r7i7n[1-8],r8i0n0,r8i5n[3-8],r8i6n[0-8],r9i0n8,r9i1n[0-8],r9i2n[7-8],r9i3n[0-8],r9i4n[0-8],r9i5n[0-2]) +``` +then the currently running job ID is `711114_4`. You can also gather the same info about the current scheduler status using `squeue`: + +``` +squeue --user=$(getent group six | cut -d: -f4) | grep tr1-13B +``` + +2. you could also see how much time is left before the current job finished (based on training log files) and then pass that many hours to `sbatch`. For example, if the job has **less** than 2 hours to run, but more than 1 hour, you want to launch it `now+2hours` from now: + +``` +sbatch --begin now+2hours --array=1-5%1 tr1-13B-round1.slurm +``` + +Using `--dependency` may lead to shorter wait times, since if the time passed to `--begin` allows even for a few minutes of delay since the stopping of the last job, the scheduler may already start some other jobs even if their priority is lower than our job. That's because the scheduler ignores any jobs with `--begin` until the specified time arrives. + + +## On Call + +When a person is on call, they need to watch that the training is either running or scheduled to run. If neither is happening they need to schedule a new training. When this situation occurs the log file will report: + +``` +***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG*** +``` + +An email alert is sent as well to `bigscience-jean-zay@groups.google.com`. + + +The next section explains how to watch the logs. + + +Other than waiting for the watchdog which runs once an hour, one can immediately see if anything is scheduled with: + +``` +$six_ALL_CCFRWORK/code/tr1-13B/bigscience/tools/slurm-status.py --job-name tr1-13B-round3 +``` + +If for some reason the training is not scheduled or running, to schedule a new training: + +``` +cd $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base +sbatch --array=1-5%1 tr1-13B-round1.slurm +``` + +This will schedule a job array of 5 jobs of 20h each, so if all goes well, that's at least 4 days of not needing to do anything other than being on the lookout for potential crashes. + +XXX: need a troubleshooting section, but elsewhere in the document that is not this training specific. + +1. if one of the nodes gets a corrupted gpu, and the training crashes there is a risk that the next job in the training will get allocated the same node, in which case it'll crash again. We need a method to identify which node is corrupted, report that to assist@idris.fr so they know to fix it and exclude this node from the slurm job by adding a list of nodes to exclude as following: + +``` +sbatch --exclude=r7i5n2,r7i5n6 ... +``` +but we currently have no way to identify which node is faulty. I think if we switch to pt-1.9.0 or higher where torch elastic replaces the usual launcher. Or we have to use dedicated log files per node via: `#SBATCH --output=%x-%j-%N.out`. + + +## Watching the training logs + +On JZ: +``` +tail -f $six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/logs/main_log.txt +``` + +Outside of JZ: +``` +perl -e '$u=shift; $b=0; while(1){($e)=qx[curl -sI $u]=~/content-length: (\d+)/; \ +print qx[curl -sr $b-$e -L $u] if $e>$b; $b=$e; sleep 300}' \ +https://huggingface.co/bigscience/tr1-13B-logs/resolve/main/main_log.txt +``` +Currently the updates happen hourly, so this is a delayed version of `tail -f`. + + +## CodeCarbon + + +CodeCarbon wasn't ready until the training was over so we only did an additional 10h run to measure with and the to extrapolate to the whole training. + +https://huggingface.co/bigscience/tr1-13B-codecarbon + +This set of records captures the startup time and 2499 iterations in 2 records per gpu, since there was also an intermediary checkpoint saved half-way and we flush the CC records on each checkpoint saving. + +The training had 168000 iterations. Therefore multiply the reported data by 67. This would be quite approximate since we were using 16 nodes when doing the ramp up, then 64 and only the last 3 weeks 128 nodes. + +Caveat emptor: I'm not sure whether CC-reports overlap since each report is per gpu and I think they may be measuring the same thing, other than the gpu itself. So this requires research. + +Each csv file contains a report for a single gpu/process. There are 512 reports. + + +## Extras diff --git a/train/tr1-13B-base/chronicles.md b/train/tr1-13B-base/chronicles.md new file mode 100644 index 0000000000000000000000000000000000000000..326c2ec1293f0bab2a17d93169ac4a0593b5fe50 --- /dev/null +++ b/train/tr1-13B-base/chronicles.md @@ -0,0 +1,425 @@ +# tr1-13B Chronicles + +Notes on the training progress with a particular focus on any encountered problems and their diagnosis and solutions/prevention. + +To follow the training progress charts, see: [tensorboard](https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard). + +To follow the raw training logs see: [logs](https://huggingface.co/bigscience/tr1-13B-logs/). + + +## Round1 SAVE_INTERVAL=10 + +NNODES=16 + +saved checkpoint each 10 steps + +`$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tr1-13B-round1/checkpoints` + +10 checkpoints (Every 10 steps 1-100) - 4TB + +## Round2 SAVE_INTERVAL=18 + +NNODES=16 + +moved the round1's checkpoints away + +rerun from scratch with the same seed + +saved checkpoint each 18 steps + +`$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B/tr1-13B-round2/checkpoints` + +51 checkpoints (Every 18 steps 101-1000) - 20TB + + +## Round3 SAVE_INTERVAL=1500 NNODES=16 + +NNODES=16 + +moved the round2's checkpoints away + +rerun from scratch with the same seed + +saved checkpoint each 1500 steps + +I did the full re-run because otherwise I couldn't separate the tensorboard logs - it is not possible to restart from a checkpoing using `TRAIN_ITER` or `EXIT_INTERVAL` which is not fixed. + +now we started uploading tensorboard logs + + +## Round3 SAVE_INTERVAL=1500 NNODES=32 + +Tried to switch to 64 nodes, but the training failed because GBS gets incremented by 16, which limits us to DP_SIZE=16 (with MBS=1) so we can do 32 nodes (128gpus at most). + +``` +DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) +16 = 32*4/(4*2) +``` + +will switch to 64 nodes once GBS reaches 1024. + + +The training then crashed with shared memory error after some 10h+ of training: +``` +ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm). +ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm). +ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm). +ERROR: Unexpected bus error encountered in worker. This might be caused by insufficient shared memory (shm). +Traceback (most recent call last): + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 986, in _try_get_data +Traceback (most recent call last): + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 986, in _try_get_data + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/queue.py", line 179, in get + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/queue.py", line 179, in get + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/threading.py", line 306, in wait + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/threading.py", line 306, in wait + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler + File "/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/utils/data/_utils/signal_handling.py", line 66, in handler +RuntimeError: DataLoader worker (pid 30882) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. +RuntimeError +The above exception was the direct cause of the following exception: +: Traceback (most recent call last): +DataLoader worker (pid 30801) is killed by signal: Bus error. It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. File "/gpfswork/rech/six/commun/code/Megatron-DeepSpeed/pretrain_gpt.py", line 215, in +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/gpfswork/rech/six/commun/code/Megatron-DeepSpeed/pretrain_gpt.py", line 215, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 144, in pretrain + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 144, in pretrain + iteration = train(forward_step_func,iteration = train(forward_step_func, + File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 675, in train + File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 675, in train + train_step(forward_step_func, + train_step(forward_step_func, File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 381, in train_step + File "/gpfsssd/worksf/projects/rech/six/commun/code/Megatron-DeepSpeed/megatron/training.py", line 381, in train_step + loss = model[0].train_batch(data_iter=data_iterator) +loss = model[0].train_batch(data_iter=data_iterator) +``` + +Each node has 94GB of /dev/shm, so it's very strange that this happened. + +``` +df -h | grep shm +tmpfs 94G 336K 94G 1% /dev/shm +``` +This is after 2h of training on one node. I wonder if the problem was on some specific node. + +Though Remi checked that all nodes used by the training that crashed had this exact setup. And all reported %1 usage. + + + +To continually diagnose the running nodes's shm memory usage: +``` +for ((;;)) { (srun --jobid 637799 --gres=gpu:0 $six_ALL_CCFRWORK/bin/report_shm_usage | grep -v "1%"); sleep 10; } +``` +after adjusting the jobid number. + +where: +``` +cat $six_ALL_CCFRWORK/bin/report_shm_usage +#!/usr/bin/bash + +# print shared memory usage with the host + +echo $(hostname) $(df -h | grep /dev/shm) +``` + +The shared memory is used by `DataLoader` workers. We just use the default `args.num_workers==2` and 94GB of shm available on each node is a huge amount of shared memory. + +And given that we use TP+PP, a single node doesn't have DDP on it, so no multiproc on the local host. Currently one full model replica uses 2 full nodes (`TP*PP = 2*4 = 8`) So it's really a single Dataloader call per each 2 nodes. i.e. tiny tiny needs. + +If this happens again, setting `args.num_workers==0` will stop using shared memory, but it'll impact the data loading speed. + +Jared hasn't seen this problem in his experience. + +So at the moment we don't know what happened. + +2 more 20h trainings have been run since then w/o any problems. + +## Checking the progress + +Someone asked when the current training will complete: + +Let's do math: + +1. we are currently going at 784 samples in 32 seconds, or 24.5 samples / sec +2. roughly we have 145M samples to go, so at the current speed 32nodes if we manage to have 20h allocation every 24 hours we get about 82 days. (145_000_000/(20*60*60*24.5)) +3. we should reach GBS=1024 hopefully today and then we can crank up to 64nodes, which should roughly double the speed, so it'll take 41 days to complete if all goes well and we don't sit in the queue for more than 4 hours. +4. we can dare to try 128 nodes, which would quadruple the speed and we should be done in about 20 days. It's hard to tell how quickly the SLURM scheduler will provide such a large allocation - if more than half-day of wait time, we are probably better off with 64 nodes. + + +## Round3 SAVE_INTERVAL=1500 NNODES=64 + +Finally GBS is at 1024, so we can do 64 nodes. Clocking about 23-26 secs / iteration - the performance jumps around quite a lot from run to run. But we know that already about JZ - it's very unsteady and depends on network usage by others. + +Created a dedicated branch `tr1-13B`, which allows further development w/o the risk of breaking the current training. + +## A huge lm loss spike + +The training loss just jumped from ~3 to ~9 +``` + iteration 29020/ 311541 | consumed samples: 10698064 | elapsed time per iteration (ms): 22306.6 | learning rate: 9.850E-05 | global batch size: 1024 | lm loss: 2.775923E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29030/ 311541 | consumed samples: 10708304 | elapsed time per iteration (ms): 22336.4 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 2.772822E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29040/ 311541 | consumed samples: 10718544 | elapsed time per iteration (ms): 22332.6 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 2.768131E+00 | loss scale: 65536.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29050/ 311541 | consumed samples: 10728784 | elapsed time per iteration (ms): 22148.5 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 7.343709E+00 | loss scale: 8192.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29060/ 311541 | consumed samples: 10739024 | elapsed time per iteration (ms): 22181.7 | learning rate: 9.849E-05 | global batch size: 1024 | lm loss: 8.715872E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29070/ 311541 | consumed samples: 10749264 | elapsed time per iteration (ms): 22107.1 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 7.654131E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29080/ 311541 | consumed samples: 10759504 | elapsed time per iteration (ms): 22131.2 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 7.192470E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 29090/ 311541 | consumed samples: 10769744 | elapsed time per iteration (ms): 22119.2 | learning rate: 9.848E-05 | global batch size: 1024 | lm loss: 6.849044E+00 | loss scale: 4096.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +``` + +You can see the spike at https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard + +It took some 500 iterations to recover. + +There was a second spike a bit later, half the first one this time and recovered very quickly. + +We discussed why it may have happened, but we don't have any definitive answer. + + +## Checkpoint bloat issue + +We have an issue with per-layer checkpoints that are way bigger than they should be. They are 10x bigger than what they should be. After some research we discovered that `torch.save()` doesn't save the current view, but the whole tensor with its original tensor storage. So that's why were were getting 10x bigger files than the actual data in the per-layer checkpoints. + +We need to `.clone()` the tensors before saving them. and then the checkpoint for layers is just modelsize*2 bytes. The reason they were bloated is because ZeRO-1 pre-allocated large tensor buffers for run-time optimization. So this needs to be fixed in Deepspeed's pipe checkpoing saving. + +Also will write a script to fix the already-saved checkpoints to `clone` and re-save. + + +## old NCCL + +Discovered the NCCL was statically linked into the distributed pytorch and it's really old 2.7.9. Supposedly newer NCCL should help with OPA interlink performance. But that means we either need to switch to a more recent pytorch or build our own. This is not resolved yet. + + +## Watchdog + +We created a watchdog, that reports if we are running/scheduled and alerts if neither is happening. E.g. the recent log in the main log file was: + +``` + iteration 33240/ 311541 | consumed samples: 15019344 | elapsed time per iteration (ms): 23491.4 | learning rate: 9.702E-05 | global batch size: 1024 | lm loss: 2.722675E+00 | loss scale: 32768.0 | grad norm: 0.000 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) +saving checkpoint at iteration 33241 to /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints +[2021-08-08 01:00:44,221] [INFO] [logging.py:68:log_dist] [Rank 0] Saving model checkpoint: /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints/global_step33241/mp_rank_00_model_states.pt + successfully saved checkpoint at iteration 33241 to /gpfsscratch/rech/six/commun/checkpoints/tr1-13B/checkpoints +time (ms) | save-checkpoint: 57514.53 +[exiting program after 1190.0357275923093 minutes] datetime: 2021-08-08 01:00:51 +[2021-08-08 01:49:40] ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG*** +[2021-08-08 02:49:44] ***ALERT: tr1-13B-round3.slurm is not RUNNING or SCHEDULED! Alert someone at Eng WG*** +[2021-08-08 03:56:54] tr1-13B-round3 is scheduled to start in 3 days, 7:24:19 (at 2021-08-11T11:21:14) (682842_[1-5%1] on 'gpu_p13' partition) +``` + +## NNODES=96 + +We thoughts that trying more nodes would be a good idea, but 96 nodes proved to be unacceptable, since + +GBS=1024 is not divisible by 384 (96*4), so there is no way to spread data evenly across all replicas. + +We can only have either 256, 512 or 1024 gpus (64, 128, 256 nodes) + +## Corrupt GPU crashes the training multiple times + +One of the array job trainings crashes after many hours of training: + +``` +iteration 43680/ 311541 | consumed samples: 25709904 | elapsed time per iteration (ms): 25593.4 | learning rate: 9.135E-05 | global batch size: 1024 | lm loss: 2.635663E+00 | loss scale: 131072.0 | grad norm: 17224.723 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) +Traceback (most recent call last): + File "/gpfswork/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/pretrain_gpt.py", line 222, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 144, in pretrain + iteration = train(forward_step_func, + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 677, in train + train_step(forward_step_func, + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 381, in train_step + loss = model[0].train_batch(data_iter=data_iterator) + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 291, in train_batch + self._exec_schedule(sched) + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 1237, in _exec_schedule + self._exec_instr(**cmd.kwargs) + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/DeepSpeed-big-science/deepspeed/runtime/pipe/engine.py", line 679, in _exec_backward_pass + torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, )) + File "/gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/autograd/__init__.py", line 145, in backward + Variable._execution_engine.run_backward( +RuntimeError: transform: failed to synchronize: cudaErrorECCUncorrectable: uncorrectable ECC error encountered +terminate called after throwing an instance of 'c10::Error' + what(): CUDA error: uncorrectable ECC error encountered +Exception raised from create_event_internal at /opt/conda/conda-bld/pytorch_1616554793803/work/c10/cuda/CUDACachingAllocator.cpp:733 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x42 (0x1500fb4d42f2 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so) +frame #1: c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::string const&) + 0x5b (0x1500fb4d167b in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so) +frame #2: c10::cuda::CUDACachingAllocator::raw_delete(void*) + 0x809 (0x1500fb72d219 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10_cuda.so) +frame #3: c10::TensorImpl::release_resources() + 0x54 (0x1500fb4bc3a4 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libc10.so) +frame #4: + 0x6e0e5a (0x150152432e5a in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libtorch_python.so) +frame #5: + 0x6e0ef1 (0x150152432ef1 in /gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/lib/libtorch_python.so) +frame #6: + 0x1a6b5a (0x56434fce9b5a in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #7: + 0x110b7c (0x56434fc53b7c in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #8: + 0x1105b9 (0x56434fc535b9 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #9: + 0x1105a3 (0x56434fc535a3 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #10: + 0x1105a3 (0x56434fc535a3 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #11: + 0x177917 (0x56434fcba917 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #12: PyDict_SetItemString + 0x4c (0x56434fcbd86c in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #13: PyImport_Cleanup + 0xac (0x56434fd2f0ec in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #14: Py_FinalizeEx + 0x79 (0x56434fd95589 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #15: Py_RunMain + 0x1bc (0x56434fd988fc in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #16: Py_BytesMain + 0x39 (0x56434fd98ce9 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +frame #17: __libc_start_main + 0xf3 (0x150183467873 in /lib64/libc.so.6) +frame #18: + 0x1f7847 (0x56434fd3a847 in /gpfswork/rech/six/commun/conda/tr1-13B/bin/python) +``` + +Nobody was around to notice and slurm scheduler started the next training job in the array, and it crashed too this time right away on: + +``` +> initializing tensor model parallel with size 2 +> initializing pipeline model parallel with size 4 +> setting random seeds to 42 ... +[2021-08-12 08:19:28,225] [INFO] [checkpointing.py:226:model_parallel_cuda_manual_seed] > initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 2760 and data parallel seed: 42 +> compiling dataset index builder ... +make: Entering directory '/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/data' +make: Nothing to be done for 'default'. +make: Leaving directory '/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/data' +>>> done with dataset index builder. Compilation time: 0.338 seconds +> compiling and loading fused kernels ... +Traceback (most recent call last): + File "/gpfswork/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/pretrain_gpt.py", line 222, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/training.py", line 95, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/initialize.py", line 89, in initialize_megatron + _compile_dependencies() + File "/gpfsssd/worksf/projects/rech/six/commun/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/megatron/initialize.py", line 140, in _compile_dependencies + torch.distributed.barrier() + File "/gpfswork/rech/six/commun/conda/tr1-13B/lib/python3.8/site-packages/torch/distributed/distributed_c10d.py", line 2420, in barrier + work = default_pg.barrier(opts=opts) +RuntimeError: CUDA error: out of memory +``` + +We figured one of the gpus had a hardware problem. So it crashed the first time. And then the scheduler allocated the same node and of course, we crashed again. + +We contacted JZ admins and indeed one of the nodes was faulty. The next training didn't hit this node and the training continued. + +Unfortunately we currently don't have a way to correlate the exceptions to the hostname of the node that it happened on. It's really to have this feature available, since if we don't, we can keep on hitting the faulty node and it'll continue crashing the training. If we know the node's hostname we can exclude it from the `sbatch --exclude=node1,node2,... `. + +update: At the moment we have to add `%N` to `#SBATCH --output=%x-%j-%N.out` and then each node will have is own log file and then we can tell which node has a corrupt GPU. + +## Really long wait time to get allocation + +When a job gets queued we often see 3 days expected wait time before yielding, but most of the time the job comes through in several hours. Sometimes we have to wait for a really long time, like 30h, with scheduler bumping our job down multiple times. This is a big problem as it pushes the finish line away continuously. We aren't anywhere close to being able to train 24/7 despite having many hours allocated to us for this project. + +Another problem is that within a project we don't have a way to give the main training job a higher priority than other jobs that we run in parallel on various experiments and small trainings. There really should be a way for a user to say, this is a high priority job amongst all other jobs of the same group. But we didn't find a way to do that. + +## Test suite added + +A `Megatron-Deepspeed` test suite was finally added. It was odd Megatron-LM didn't have one in the first place, so we had to create our own. + +Now need to find some hardware with 2 gpus to create a CI. + +## Reduced evaluation iterations + +Noticed that somehow it was configured to run eval for 100 iterations, after discussion reduced it to 5, thus saving some resources. While validation iterations are much faster than training, this wasn't really needed. + +## NNODES=128 + +Taking advantage of August's holiday in France was able to switch to 128 nodes. + +Observed a further drop in TFLOPs, since now we had even less microbatches to go around. This is because Global BS remained the same (GBS=1024) and we currently use 2 nodes for a single replica (TP=2 * TP=4). So with 128 nodes, we have 64 replicas, which leaves only GAS=16 per replica, and that's too little for an efficient pipeline. The idle bubble is too big. + +The benchmarking/tune up was done with GAS=128 (GBS=1024/8) and that's where we were getting high TFLops. + +Nevertheless, the training is going much faster now and we will catch up lost time quickly. + +## NCCL experiments + +It was suggested that newer NCCL will lead to faster inter-node communication. + + +hypothesis that newer nccl should be faster on JZ, but the short experiments I run didn't support it. I get the same throughput with: + +1. pt=1.8.1, cuda=11.1, nccl=2708 +2. pt=1.9.0, cuda=11.1, nccl=2708 +3. pt=1.10.0.dev20210821, cuda=11.3, nccl=(2, 10, 3) + +The experiment was run on the same 4-node allocation with GBS=64, but otherwise everything else was the same as the current training script. The speed was 17-17.5 secs per iteration. Did about 100 iterations. +So we will stick to pt=1.8.1 for now until a need arises to change that. + +## SLURM Job Arrays and Dependency + +Switched to using SLURM Job Arrays and Dependency to schedule jobs. Since our account has a huge allocation we were able to start new 20h jobs with no delay. + +If this approach is not used even a tiny delay between finishing one job and scheduling the next one often lead to 1-30 hours of wait time in the queue. This is because the scheduler was quick to allocate other jobs in the first few seconds of finishing the currently running job. + +The problem remained if something goes wrong - e.g. a mistake in a script or some hardware issue, would lead to a delay in staring new jobs and a long long wait time. + +This training was getting its software updated a lot as missing features were added, so it wasn't a super-stable polished production environment. + +So as long as we had a stable setup using SLURM Job Arrays and Dependency chaining things went well. When we couldn't use those SLURM was delaying our training sometimes by a lot. + +Also since we run secondary trainings we learned to use `--nice=10000` for those trainings. Without this method all slurm jobs of the same account had the same priority. + +## Added an alert email notification + +Previously implemented watchdog now got hooked up to email notifications, so if it detected that no job was running or scheduled it'd let the group know. + +## Checkpoint bloat fixed + +The Deepspeed team fixed the bloat in the checkpoints, so new checkpoints were taking 10x less space for layer weights. + +I then processed all the old checkpoints to remove the bloat using: + +``` +srun -p prepost -A six@cpu --time=20:00:00 --pty bash +wget https://raw.githubusercontent.com/stas00/toolbox/master/pytorch/pt-checkpoint-shrink.py +chmod a+x pt-checkpoint-shrink.py +cd checkpoints +find -type d -name "global_step*" -exec pt-checkpoint-shrink.py --checkpoint_dir {} --patterns "layer*pt" \; +``` + +## CI was added + +A CI was implemented using EC2 instance on demand. With the help of https://github.com/machulav/ec2-github-runner + +Eventually it proved to be not usable for PRs made from the forks, as EC2 needs secrets that github actions won't give to PRs not originating from the origin. So this CI is not very useful. + + +## Training completed + +On Sep 6th we reached the 300B tokens and on Sep 7th we stopped the training - It took some ~5 weeks to complete. + + +## Checkpoint conversion + +We still need to figure out how to make the checkpoint available in the HF `transformers` format. This is a work in progress. + +Update: This has been done. All checkpoints converted to HF format and uploaded to HUB. + +See [README.md](README.md) for nuances of the conversion. + +Made a mistake in the activation function setting when writing the HF model after the conversion. It proved to be a complex situation but it needs to be `gelu_fast` on the HF side since we are using `args.openai_gelu = False; args.bias_gelu_res = True`. So applied fixes to the models on the HUB using the following: + +``` +cd /gpfsssd/scratch/rech/six/commun/experiments/fix-config/ +export GIT_LFS_SKIP_SMUDGE=1 +git clone https://huggingface.co/bigscience/tr3e-1B3-c4-checkpoints +cd tr3e-1B3-c4-checkpoints +~/prod/code/bigscience/tools/hub-sync.py --repo-path . --patterns '*bogus*' +set +H +git branch -a | sort -V | perl -lne 'm|(global_step\d+)| && print qx[git checkout $1; perl -pi -e "s/gelu(?!_)/gelu_fast/" $1/config.json; git commit -m "gelu_fast is the correct activation_function" .; git push --set-upstream origin $1]' +export GIT_LFS_SKIP_SMUDGE=0 +``` +Note using the trick of not checkout out LFS files since we only need to modify `config.json` which is a normal file - this is thousands times faster than normal checkout. + + + +and for GCS: +``` +start-prod +cd /gpfsssd/scratch/rech/six/commun/checkpoints/to-upload/ +perl -pi -e 's|gelu|gelu_fast|' checkpoints/*/config.json +gsutil -m rsync -x ".*bin$" -r checkpoints gs://bigscience-backups/tr1-13B/checkpoints +``` diff --git a/train/tr1-13B-base/start-tr1-13B b/train/tr1-13B-base/start-tr1-13B new file mode 100644 index 0000000000000000000000000000000000000000..9850a2614c59b291cae5c4ad94eb23604c96d7ed --- /dev/null +++ b/train/tr1-13B-base/start-tr1-13B @@ -0,0 +1,57 @@ +# This is a python production script for JZ / tr1-13B training +# +# Activate with: +# +# source ./start-tr1-13B +# +# + +# # if this session isn't run via a login shell, which is the case when running a +# # command which is not shell via ssh, the bash function `module` will be missing. +# # so work around it by emulating part of the login shell that loads modules environment +# if [ -z $(type -t module) ] +# then +# . /etc/profile.d/z_modules.sh +# fi +module purge +module load pytorch-gpu/py3/1.8.1 +module load nvtop git-lfs github-cli mc + +# git prompt +export GIT_PROMPT_ONLY_IN_REPO=0; +export GIT_PROMPT_THEME="JZPRod" +source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh + +# We are using common disk spaces for datasets, caches, and experiment dumps: +# +#- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets` +#- Experiment dumps -> `$six_ALL_CCFRWORK/experiments` + +# specific caches + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom + +### CONDA ### + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then + . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" + else + export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda +conda activate tr1-13B diff --git a/train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm b/train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm new file mode 100644 index 0000000000000000000000000000000000000000..ef11f58279b6664d2bb711055bcf9949e1a7b90c --- /dev/null +++ b/train/tr1-13B-base/tr1-13B-hub-sync-logs.slurm @@ -0,0 +1,23 @@ +#!/bin/bash +#SBATCH --job-name=tr1-13B-hub-sync-logs # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=1:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=compil +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + +source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B +LOGS_PATH=$DATA_OUTPUT_PATH/logs +BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.txt' -d + +echo "END TIME: $(date)" diff --git a/train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm b/train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm new file mode 100644 index 0000000000000000000000000000000000000000..d510eddfb202e24a5c68bc8b725c3a1df942a990 --- /dev/null +++ b/train/tr1-13B-base/tr1-13B-hub-sync-tensorboard.slurm @@ -0,0 +1,23 @@ +#!/bin/bash +#SBATCH --job-name=tr1-13B-hub-sync-tensorboard # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=2:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=compil +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + +source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d + +echo "END TIME: $(date)" diff --git a/train/tr1-13B-base/tr1-13B-round1.slurm b/train/tr1-13B-base/tr1-13B-round1.slurm new file mode 100644 index 0000000000000000000000000000000000000000..bfc02df5a17fd187ac1234d09486112e39ea39ba --- /dev/null +++ b/train/tr1-13B-base/tr1-13B-round1.slurm @@ -0,0 +1,174 @@ +#!/bin/bash +#SBATCH --job-name=tr1-13B-round3 +#SBATCH --constraint=v100-32g +#SBATCH --nodes=128 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B + +echo "START TIME: $(date)" + +#ROUND=3 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon +LOGS_PATH=$DATA_OUTPUT_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/ + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=128 # switch to 128 +TP_SIZE=2 # always fixed to the size of a single node +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=1024 + +NLAYERS=40 +NHIDDEN=5120 +NHEADS=32 +FFN_HIDDEN_SIZE=20480 +SEQ_LEN=2048 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 1e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 16 16 5_000_000 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples 300_000_000 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + --seed 42 + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 5 \ + --codecarbon-dir $CODECARBON_PATH \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" + +# diff --git a/train/tr1-13B-base/tr1-13B-short.slurm b/train/tr1-13B-base/tr1-13B-short.slurm new file mode 100644 index 0000000000000000000000000000000000000000..81296c82961819463a21cbe32f43562207a4867a --- /dev/null +++ b/train/tr1-13B-base/tr1-13B-short.slurm @@ -0,0 +1,189 @@ +#!/bin/bash +#SBATCH --job-name=tr1-13B-short +#SBATCH --constraint=v100-32g +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + + + +# This is the same as the main script but pre-configured for a small model that can be easily tested +# on 1 or 2 nodes, which is handy to verify everything works before using it on the main training. +# +# It's also useful for pre-building megatron CUDA kernels if and when things get borked and it gets stuck in building kernels. +# +# Change to NNODES=1 if needed +# +# to allocate (change to 2 for NNODES=2) +# salloc --constraint=v100-32g --account=six@v100 --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash + +source $six_ALL_CCFRWORK/code/tr1-13B/bigscience/train/tr1-13B-base/start-tr1-13B + +set -x -e + +echo "START TIME: $(date)" + +#ROUND=3 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B-test +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +CODECARBON_PATH=$DATA_OUTPUT_PATH/codecarbon +LOGS_PATH=$DATA_OUTPUT_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr1-13B/Megatron-DeepSpeed-tr1-13B/ + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=2 # switch to 64 +TP_SIZE=2 # always fixed to the size of a single node +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=64 + +NLAYERS=8 +NHIDDEN=512 +NHEADS=8 +FFN_HIDDEN_SIZE=2048 +SEQ_LEN=512 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=2000 + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 1e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 16 16 5_000_000 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples 300_000_000 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + --seed 42 + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 5 \ + --codecarbon-dir $CODECARBON_PATH \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +# /usr/bin/env PYTHONPATH="." `pwd`/pretrain_gpt.py \ +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' +#2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" + +# diff --git a/train/tr1-13B-base/tr1-13B-slurm-status.slurm b/train/tr1-13B-base/tr1-13B-slurm-status.slurm new file mode 100644 index 0000000000000000000000000000000000000000..6aa289d86fbb8a15cbf44efdda7367df029fed3a --- /dev/null +++ b/train/tr1-13B-base/tr1-13B-slurm-status.slurm @@ -0,0 +1,23 @@ +#!/bin/bash +#SBATCH --job-name=tr1-13B-slurm-status # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=0:30:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=compil +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr1-13B +LOGS_PATH=$DATA_OUTPUT_PATH/logs +MAIN_LOG_FILE=$LOGS_PATH/main_log.txt +BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr1-13B/bigscience +# XXX: fix me on the next schedule when the name is fixed to remove .slurm +WATCH_SLURM_NAME=tr1-13B-round3 + +$BIG_SCIENCE_REPO_PATH/tools/slurm-status.py --job-name $WATCH_SLURM_NAME 2>&1 | tee -a $MAIN_LOG_FILE + +echo "END TIME: $(date)" diff --git a/train/tr10-13B-ml/README.md b/train/tr10-13B-ml/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c196ae2a99df7ac3b5b8b232efd395eca83f5e56 --- /dev/null +++ b/train/tr10-13B-ml/README.md @@ -0,0 +1,111 @@ +# tr10 13B ML + + +## setup/tune up + + +To interactively tune up the setup: + +``` +salloc --constraint=v100-32g --account=six@v100 --nodes=4 --ntasks=4 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=120 bash --rcfile $six_ALL_CCFRWORK/code/tr10-13B/bigscience/train/tr10-13B-ml/start-tr10-13B +``` + + +Conda setup: + +``` +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda + +conda create -y -n tr10-13B python=3.8 +conda activate tr10-13B + +pip3 install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html + +mkdir $six_ALL_CCFRWORK/code/tr10-13B +cd $six_ALL_CCFRWORK/code/tr10-13B + +cd $six_ALL_CCFRWORK/code/tr10-13B/apex +./build.sh + +pip install deepspeed + +cd $six_ALL_CCFRWORK/code/tr10-13B/DeepSpeed +./build.sh + +pip install transformers + +cd $six_ALL_CCFRWORK/code/tr10-13B/transformers +pip install -e . + +cd $six_ALL_CCFRWORK/code/tr10-13B/megatron-lm +pip install -r requirements.txt +``` + +Env setup script to be `source start-tr10-13B` [start-tr10-13B](./start-tr10-13B) + + + +configs: + +works: +``` +NNODES=4 +TP_SIZE=4 +PP_SIZE=4 +``` + + +tokenizer + +It's at https://huggingface.co/teven/test_150k_vocab_tokenizer/tree/main ! + +So instead of running with : +``` +--vocab-file $VOCAB_FILE \ +--merge-file $MERGE_FILE \ +``` + +You should run with: +``` +--tokenizer-type PretrainedFromHF \ +--tokenizer-name-or-path teven/test_150k_vocab_tokenizer \ +``` + +Preprocessed a c4 10k samples, you can use it with: +``` +DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/150k_vocab_size_test/c4_10k_samples_150k_vocab_size +``` + +## Config + + +Julien Launay: + +(1) the main difference will be multilinguality, and the larger vocabulary. +(2) For PrefixLM, we are not sure yet, as for now prefix is underperforming the vanilla model + it has some quirks. Thomas is working on a potential fix. We will keep you updated, but I think you can start working without prefix. +(3) Embeddings. ALiBi is still underperforming all others. Maybe we could consider going with rotary? @Iz Beltagy what's your opinion on this? Rotary probably won't change significantly your benchmark, but will degrade performance by a few percents across the board. +we don’t have a conclusive answer yet but both shouldn’t affect model size. If any, they will make the model a tiny bit smaller +(4) Activation. We need to evaluate the GeGLU run. GeGLU would bring a significant change to the size of the MLPs, which would be significant for your benchmark. +it shouldn’t change the overall model size but will change the size of some of the FF layers so might change how TP works + +### `--init-method-std` + +`--init-method-std 0.00884` + +We derived this from: `NHIDDEN=5120` + +`0.00884 = sqrt(2/(5120*5))` (from the ScaleNorm paper https://arxiv.org/abs/1910.05895) + +### `NHEADS` + +NHEADS=40, why... + +### `--embed-layernorm` + +We want this because it solved the problems with the 104B training instabilities. + +If we choose to train with it, we will need to add the additional code to for the HF GPT2 converted model. + +### `--partition-activations` + +can be used to shard activations across gpus to save more gpu memory diff --git a/train/tr10-13B-ml/start-tr10-13B b/train/tr10-13B-ml/start-tr10-13B new file mode 100644 index 0000000000000000000000000000000000000000..bed08351782332dd17c954208361b9c59e3a696d --- /dev/null +++ b/train/tr10-13B-ml/start-tr10-13B @@ -0,0 +1,58 @@ +# This is a python production script for JZ / tr10-13B training +# +# Activate with: +# +# source ./start-tr10-13B +# +# + +# # if this session isn't run via a login shell, which is the case when running a +# # command which is not shell via ssh, the bash function `module` will be missing. +# # so work around it by emulating part of the login shell that loads modules environment +# if [ -z $(type -t module) ] +# then +# . /etc/profile.d/z_modules.sh +# fi +module purge +module load pytorch-gpu/py3/1.9.0 +module load nvtop git-lfs github-cli mc + +# git prompt +export GIT_PROMPT_ONLY_IN_REPO=0; +export GIT_PROMPT_THEME="JZPRod" +source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh + +# We are using common disk spaces for datasets, caches, and experiment dumps: +# +#- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets` +#- Experiment dumps -> `$six_ALL_CCFRWORK/experiments` + +# specific caches + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom + +### CONDA ### + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then + . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" + else + export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda +conda activate base +conda activate tr10-13B diff --git a/train/tr10-13B-ml/tr10-13B.slurm b/train/tr10-13B-ml/tr10-13B.slurm new file mode 100644 index 0000000000000000000000000000000000000000..fcde42cd3ccdcf7db42a5bf980f21ed5688731cd --- /dev/null +++ b/train/tr10-13B-ml/tr10-13B.slurm @@ -0,0 +1,182 @@ +#!/bin/bash +#SBATCH --job-name=tr10-13B +#SBATCH --constraint=v100-32g +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr10-13B/bigscience/train/tr10-13B-ml/start-tr10-13B + +echo "START TIME: $(date)" + +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr10-13B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr10-13B-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +LOGS_PATH=$REPO_PATH/logs +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr10-13B/Megatron-DeepSpeed + +TOKENIZER_NAME=teven/test_150k_vocab_tokenizer +DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/150k_vocab_size_test/c4_10k_samples_150k_vocab_size + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=4 # switch to 128 +TP_SIZE=2 # always fixed to the size of a single node +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=40 +NHIDDEN=5120 +NHEADS=32 +SEQ_LEN=2048 +VOCAB_SIZE=150000 + +SAVE_INTERVAL=300 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 16 16 6_000_000 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples 300_000_000 \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME \ + --loss-scale 12 \ + --init-method-std 0.00884 \ + --fp16 \ + --checkpoint-activations \ + --seed 43 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 5 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + --log-level info \ + --log-level-replica error \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +# export LAUNCHER="python -u -m torch.distributed.launch \ +# --nproc_per_node $GPUS_PER_NODE \ +# --nnodes $NNODES \ +# --master_addr $MASTER_ADDR \ +# --master_port $MASTER_PORT \ +# " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 900,100,0 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +export OMP_NUM_THREADS=1 # shut up the launcher warnings + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" + +# diff --git a/train/tr12-1B3-oscar/tr12a-1B3-oscar-en-filtered.slurm b/train/tr12-1B3-oscar/tr12a-1B3-oscar-en-filtered.slurm new file mode 100644 index 0000000000000000000000000000000000000000..93c445a3522f81255eec7ae8a48688f5b1f1b229 --- /dev/null +++ b/train/tr12-1B3-oscar/tr12a-1B3-oscar-en-filtered.slurm @@ -0,0 +1,191 @@ +#!/bin/bash +#SBATCH --job-name=1B3-oscar-en-filtered.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%a-%j.out # output file name +#SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 +#SBATCH --mail-type=ALL +#SBATCH --mail-user=hugo@huggingface.co + +set -x -e + + +ROUND=2 +TESTING=0 + +EXPERIMENT_NAME=tr12a-1B3-oscar-en-filtered +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/ +CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints +REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=/gpfsscratch/rech/six/urd43gx/oscar_en_v2_filtered/preprocessed_data_megatron/oscar_filtered_en_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr12-1B3-oscar/tr12b-1B3-oscar-en-filtered-dedup.slurm b/train/tr12-1B3-oscar/tr12b-1B3-oscar-en-filtered-dedup.slurm new file mode 100644 index 0000000000000000000000000000000000000000..11ef21114b7350de1fc784f9d4286272c33f2cb9 --- /dev/null +++ b/train/tr12-1B3-oscar/tr12b-1B3-oscar-en-filtered-dedup.slurm @@ -0,0 +1,191 @@ +#!/bin/bash +#SBATCH --job-name=1B3-oscar-en-filtered-dedup.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%a-%j.out # output file name +#SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 +#SBATCH --mail-type=ALL +#SBATCH --mail-user=hugo@huggingface.co + +set -x -e + + +ROUND=2 +TESTING=0 + +EXPERIMENT_NAME=tr12b-1B3-oscar-en-filtered-dedup +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/ +CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints +REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=/gpfsscratch/rech/six/urd43gx/oscar_en_v2_filtered/preprocessed_data_megatron_filtered_dedup_en/oscar_filtered_dedup_en_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr12-1B3-oscar/tr12c-1B3-oscar-en-overfiltered.slurm b/train/tr12-1B3-oscar/tr12c-1B3-oscar-en-overfiltered.slurm new file mode 100644 index 0000000000000000000000000000000000000000..9efc8ddb4ebd3b926d49c7158611878f69e1d559 --- /dev/null +++ b/train/tr12-1B3-oscar/tr12c-1B3-oscar-en-overfiltered.slurm @@ -0,0 +1,191 @@ +#!/bin/bash +#SBATCH --job-name=1B3-oscar-en-overfiltered.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%a-%j.out # output file name +#SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 +#SBATCH --mail-type=ALL +#SBATCH --mail-user=hugo@huggingface.co + +set -x -e + + +ROUND=2 +TESTING=0 + +EXPERIMENT_NAME=tr12c-1B3-oscar-en-overfiltered +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/ +CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints +REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=/gpfsscratch/rech/six/urd43gx/oscar_en_v2_overfiltered/preprocessed_data_megatron/oscar_overfiltered_en_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3mt.slurm b/train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3mt.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1cbadc995dd692bbe9e1b80f9b5b523fdcfcd9da --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13-6b3-mtf-xp3mt.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3mt +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 8:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3mt + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mt_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mt_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 10 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq-a100.slurm b/train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq-a100.slurm new file mode 100644 index 0000000000000000000000000000000000000000..f9e5ae267d20d16a6bbd495523a6b91131faa7a1 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq-a100.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=1b3xp3capmixnewcodelong +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnewcodelonglossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13b-1B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13b-1B3-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13b-1B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=2 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +# --reset-progress \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --reset-progress \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 2 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq.slurm b/train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..638f14c4a17378e63971b42ce4ed52c40cdf07ce --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13b-1b3-ml-xp3capmixnewcodelonglossseq.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=1b3xp3capmixnewcodelong +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnewcodelonglossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13b-1B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13b-1B3-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13b-1B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=2 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +# --reset-progress \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --reset-progress \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 10 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq-a100.slurm b/train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq-a100.slurm new file mode 100644 index 0000000000000000000000000000000000000000..9be165feb38e7a350a59c5bdb87da18704122daf --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq-a100.slurm @@ -0,0 +1,210 @@ +#!/bin/bash +#SBATCH --job-name=2b5xp3capmixnewcodelong +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnewcodelonglossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13c-2B5-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13c-2B5-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13c-2B5-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +# --reset-progress \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 2 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq.slurm b/train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..53f05a3bf5f3c1ab8b31e4a2f9633ba9f20f6fce --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13c-2b5-mtf-xp3capmixnewcodelonglossseq.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=2b5xp3capmixnewcodelong +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=32 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnewcodelonglossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13c-2B5-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13c-2B5-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13c-2B5-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +# --reset-progress \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --reset-progress \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 2 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-val.slurm b/train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-val.slurm new file mode 100644 index 0000000000000000000000000000000000000000..725e1e42237788057ef2beb6dcb9b58a92c90ea7 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13e-350m-mtf-xp3capmixnewcodelonglossseq-val.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=val350mxp3capmixnewcodelong +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnewcodelonglossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13e-350M-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13e-350M-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew350m/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13e-350M-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=1024 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 5e-4 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + --reset-progress \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 10 \ + --eval-only True \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-bos.slurm b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-bos.slurm new file mode 100644 index 0000000000000000000000000000000000000000..57e6a5cb25d70af2bb6aaf11b581984d8b3a5c93 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-bos.slurm @@ -0,0 +1,209 @@ +#!/bin/bash +#SBATCH --job-name=bostr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=bos + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31bos_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31bos_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=1000 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3.slurm b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3.slurm new file mode 100644 index 0000000000000000000000000000000000000000..3fa7b178a0bacd54f57f258ea62e4fd17fd2ea07 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3.slurm @@ -0,0 +1,210 @@ +#!/bin/bash +#SBATCH --job-name=xp3tr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=1000 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3mixed.slurm b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3mixed.slurm new file mode 100644 index 0000000000000000000000000000000000000000..4de16523af0adf56709e089a60c87db2f19ea894 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf-xp3mixed.slurm @@ -0,0 +1,210 @@ +#!/bin/bash +#SBATCH --job-name=xp3mixedtr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3mixed + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mixed_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3mixed_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6B3-mtf.slurm b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf.slurm new file mode 100644 index 0000000000000000000000000000000000000000..097ac497a7c44efb4fea052b29a6a01e3cad611d --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6B3-mtf.slurm @@ -0,0 +1,209 @@ +#!/bin/bash +#SBATCH --job-name=tr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$SCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=/gpfsscratch/rech/six/uty16tp/checkpoints/tr13f-6B3-ml-t0/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=$WORK/code/big_science/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=1000 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 10 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0_non_causal_decoder.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31lossseq.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31lossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..e001e2d27b4f27818e300e38c2a7f49f0337202c --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-p31lossseq.slurm @@ -0,0 +1,210 @@ +#!/bin/bash +#SBATCH --job-name=p31tr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=p31 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseq/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3cap.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3cap.slurm new file mode 100644 index 0000000000000000000000000000000000000000..3e7cf04280ea49601bd99fff4a005dc5b302fff9 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3cap.slurm @@ -0,0 +1,210 @@ +#!/bin/bash +#SBATCH --job-name=xp3captr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3cap + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3cap_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3cap_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlossseq.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..79ac192b69adac2bb1785a2c309f3af103ed7c30 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixfixlossseq.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3capmixfixlossseq +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixfixlossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfix_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixfix_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseq.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..73616b76b108948b42a19a04dc95cefdcee9196d --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseq.slurm @@ -0,0 +1,210 @@ +#!/bin/bash +#SBATCH --job-name=xp3mixedtr13f-6B3-ml-t0 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixlossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseq/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmix_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmix_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqbos.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqbos.slurm new file mode 100644 index 0000000000000000000000000000000000000000..54ace317aeb46f9a6a46f797610f85262f919538 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixlossseqbos.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3capmixlossseqbos +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixlossseqbos + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv3bos_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv3bos_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..5ed3ced89f0733ef3d9ab3228c6f7ad992386094 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnewcodelonglossseq.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3capmixnewcodelong +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnewcodelonglossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 125 \ + --eval-iters 10 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnostriplossseq.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnostriplossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..2b0d7b73bec11fc50594a0797bba5cbd7adcf325 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixnostriplossseq.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3capmixnostriplossseq +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixnostriplossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnostrip_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnostrip_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixv2lossseqbitfit.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixv2lossseqbitfit.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a60d3e08c5218a847e22a07428e9d8721ac681ad --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3capmixv2lossseqbitfit.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3capmixv2lossseqbitfit +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3capmixv2lossseqbitfit + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqbitfit/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv2_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixv2_validation.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 4e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --bitfit \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 50 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3zzlossseq.slurm b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3zzlossseq.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b0b7399d284cfffdaa044f53c1341efcfb677632 --- /dev/null +++ b/train/tr13-mtf/smaller_models/tr13f-6b3-mtf-xp3zzlossseq.slurm @@ -0,0 +1,211 @@ +#!/bin/bash +#SBATCH --job-name=xp3zzlossseq +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=xp3zzlossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3zzlossseq_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/xp3capmixnewcodelong_validation_pretr.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 +# 250 +SAVE_INTERVAL=2 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 10 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/finetune_t0.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr13-mtf/tr13-176B-mtf-p31lossseq-val.slurm b/train/tr13-mtf/tr13-176B-mtf-p31lossseq-val.slurm new file mode 100644 index 0000000000000000000000000000000000000000..779d1fbe28fda037af1bc29816da8a0c1e9a51fc --- /dev/null +++ b/train/tr13-mtf/tr13-176B-mtf-p31lossseq-val.slurm @@ -0,0 +1,225 @@ +#!/bin/bash +#SBATCH --job-name=val-tr13-176B-ml-p31lossseq +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=36 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 +echo "START TIME: $(date)" + +variant=p31lossseq + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13-176B-ml-t0 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr13-176B-ml-t0-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH +mkdir -p $TENSORBOARD_PATH + +MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseqnew/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13-176B-mtf + +TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_train.txt +VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation_pretr.txt +TOKENIZER_NAME_OR_PATH=bigscience/tokenizer + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOB_ID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6001 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +# TP=1/PP=72/MBS=1/Nodes=36 +PP_SIZE=72 +TP_SIZE=1 + +# T0 paper: +# ...truncate input and target sequences to 1024 and 256 tokens... +# ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch... +# We use 2048 total tokens and 512 batch size = 2**20 +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NHIDDEN=14336 +NLAYERS=70 +NHEADS=112 +SEQ_LEN=2048 + +# After CKPT is saved; stop training; change to desired SAVE_INTERNAL & remove no-load-optim & remove universal ckpt +SAVE_INTERVAL=5 + +TRAIN_SAMPLES=6_348_800 + +# T0 paper: +# "...we use a learning rate of 1e-3..." +# However, they use Adafactor, which adapts the LR +# For Adam we likely want a lower one +# FLAN: +# "...decay of 1e-4.."" + +# Uncomment for the first step +# --no-load-optim \ +# --reset-progress \ +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-5 \ + --lr-decay-style constant \ + --lr-warmup-samples 0 \ + --clip-grad 1.0 \ + --weight-decay 1e-4 \ + --no-load-optim \ + --norm-target-loss \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --sync-tp-duplicated-parameters \ + --bf16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 250 \ + --eval-iters 10 \ + --eval-only True \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "bf16": { + "enabled": true + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +# --universal-checkpoint \ +export CMD=" \ + `pwd`/finetune_t0.py \ + --universal-checkpoint \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --dataloader-type single \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +# force crashing on nccl issues like hanging broadcast +export NCCL_ASYNC_ERROR_HANDLING=1 + +# srun error handling: +# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks +# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code +SRUN_ARGS=" \ + --wait=60 \ + --kill-on-bad-exit=1 \ + " + +clear; srun $SRUN_ARGS --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr14-mup/grid_search.sh b/train/tr14-mup/grid_search.sh new file mode 100644 index 0000000000000000000000000000000000000000..3808a42a0cdd23f1efa3f96252a8a534bd11f9b1 --- /dev/null +++ b/train/tr14-mup/grid_search.sh @@ -0,0 +1,16 @@ +for inpm in 10 1 0.1 0.01 0.001 +do + for outm in 10 1 0.1 0.01 0.001 + do + for atnm in 10 1 0.1 0.01 0.001 + do + for lr in 0.1 0.03 0.01 0.003 0.001 + do + for init in 1 0.3 0.1 0.03 0.01 + do + sbatch --job-name=tr14-39M-lr$lr-init$init-inpm$inpm-outm$outm-atnm$atnm-mup tr14-39M-grid-search-mup.slurm $lr $init $inpm $outm $atnm + done + done + done + done +done diff --git a/train/tr14-mup/lr_sweep.sh b/train/tr14-mup/lr_sweep.sh new file mode 100644 index 0000000000000000000000000000000000000000..08efad3325c280f709975efd451f8acb644bdc73 --- /dev/null +++ b/train/tr14-mup/lr_sweep.sh @@ -0,0 +1,7 @@ +for lr in 0.052 0.017 0.0052 0.0017; do + sbatch --job-name=tr14-39M-lr$lr-init0.1-inpm10-outm10-atnm10-mup tr14-39M-grid-search-mup.slurm $lr 0.1 10 10 10 +done + +for lr in 0.01 0.052 0.03 0.017 0.01 0.0052 0.003 0.0017 0.001; do + sbatch --job-name=tr14-2B7-lr$lr-init0.1-inpm10-outm10-atnm10-mup tr14-2B7-grid-search-mup.slurm $lr 0.1 10 10 10 +done diff --git a/train/tr14-mup/tr14-2B7-grid-search-mup.slurm b/train/tr14-mup/tr14-2B7-grid-search-mup.slurm new file mode 100644 index 0000000000000000000000000000000000000000..f4f1ed92ff6f656173167cb3075dbf3b3f84da79 --- /dev/null +++ b/train/tr14-mup/tr14-2B7-grid-search-mup.slurm @@ -0,0 +1,206 @@ +#!/bin/bash +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 04:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x.out # output file name +#SBATCH --partition=gpu_p5 +#SBATCH --account=ajs@a100 +#SBATCH -C a100 + +set -x -e + +#source $ajs_ALL_CCFRWORK/start-py38-pt110 +#source $ajs_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_PATH=$ajs_ALL_CCFRSCRATCH/datasets/c4/gpt2tok_c4_text_document +DATA_OUTPUT_PATH=$ajs_ALL_CCFRSCRATCH/checkpoints/tr14-2B7-mup-lr$1-init$2-inpm$3-outm$4-atnm$5-mup +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-mup-lr$1-init$2-inpm$3-outm$4-atnm$5-mup-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$ajs_ALL_CCFRWORK/code/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +TOKENIZER_NAME_OR_PATH=t5-small + +# defining the right environment variables +export TRANSFORMERS_CACHE=$ajs_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$ajs_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$ajs_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$ajs_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=2 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=32 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=25_000_000 # 51.2B tokens +LR_DECAY_SAMPLES=25_000_000 # Decay in the same amount +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +MUP_ARGS=" \ + --lr $1 \ + --min-lr `bc <<< "scale=3; $1/10"` \ + --init-method-std $2 \ + --mup \ + --mup-input-mult $3 \ + --mup-output-mult $4 \ + --mup-attn-mult $5 \ +" + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 51200 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + $MUP_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr14-mup/tr14-2B7-mup-cluster.slurm b/train/tr14-mup/tr14-2B7-mup-cluster.slurm new file mode 100644 index 0000000000000000000000000000000000000000..046e099328adeca4a2c0c85153ad272cc71bc5f9 --- /dev/null +++ b/train/tr14-mup/tr14-2B7-mup-cluster.slurm @@ -0,0 +1,237 @@ +#!/bin/bash +#SBATCH --job-name=tr14-2B7-mup +#SBATCH --partition=production-cluster +#SBATCH --nodes=8 +#SBATCH --cpus-per-task=12 +#SBATCH --ntasks-per-node=1 +#SBATCH --gres=gpu:a100:8 +#SBATCH --hint=nomultithread +#SBATCH --time 100:00:00 +#SBATCH --output=/fsx/teven/mup/tr14-2B7-%j.out +#SBATCH --exclude=ip-26-0-159-215,ip-26-0-153-238 + +echo "START TIME: $(date)" + +mkdir -p $LOGS_PATH + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/admin/home/teven/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/admin/home/teven/miniconda3/etc/profile.d/conda.sh" ]; then + . "/admin/home/teven/miniconda3/etc/profile.d/conda.sh" + else + export PATH="/admin/home/teven/miniconda3/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +# Proper env variables +conda activate tvn_dev +export PATH=/usr/local/cuda-11.4/bin:$PATH +export NCCL_PROTO=simple +export PATH=/opt/amazon/efa/bin:$PATH + +export FI_EFA_FORK_SAFE=1 +export FI_LOG_LEVEL=1 +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn + +#export NCCL_ALGO=ring +#export NCCL_DEBUG=info +#export NCCL_DEBUG_SUBSYS=INIT,ENV,GRAPH,COLL + +export PYTHONFAULTHANDLER=1 + +export CUDA_LAUNCH_BLOCKING=0 +export OMPI_MCA_mtl_base_verbose=1 +export FI_EFA_ENABLE_SHM_TRANSFER=0 +export FI_PROVIDER=efa +export FI_EFA_TX_MIN_CREDITS=64 +export NCCL_TREE_THRESHOLD=0 +#export TORCH_CPP_LOG_LEVEL=INFO +#export TORCH_DISTRIBUTED_DEBUG=INFO + +export NCCL_ASYNC_ERROR_HANDLING=1 +#export NCCL_P2P_DISABLE=1 +#export NCCL_IBEXT_DISABLE=1 +#export NCCL_SOCKET_IFNAME="eth0,en,eth,em,bond" + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) +export MASTER_PORT=12802 + + +MEGATRON_DEEPSPEED_REPO=/fsx/teven/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +TOKENIZER_NAME_OR_PATH=t5-small + +variant=main + +DATA_PATH=/fsx/data/gpt2tok_c4_text_document +DATA_OUTPUT_PATH=/fsx/mup_exps/checkpoints/tr14-2B7-lr$1-init0.1-inpm10-outm10-atnm10-mup +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-test-lr$1-init0.1-inpm10-outm10-atnm10-mup +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=2 + +MICRO_BATCH_SIZE=16 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=32 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=1_953_125 # 50B tokens +LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +MUP_ARGS=" \ + --lr $1 \ + --min-lr `bc <<< "scale=3; $1/10"` \ + --init-method-std 0.1 \ + --mup \ + --mup-input-mult 10 \ + --mup-output-mult 10 \ + --mup-attn-mult 10 \ +" + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 51200 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + $MUP_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr14-mup/tr14-2B7-mup.slurm b/train/tr14-mup/tr14-2B7-mup.slurm new file mode 100644 index 0000000000000000000000000000000000000000..5b2f343011749ad37ec2c357fa8c881df20ddfb3 --- /dev/null +++ b/train/tr14-mup/tr14-2B7-mup.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=16 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 05:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x.out # output file name +#SBATCH --partition=gpu_p5 +#SBATCH --account=ajs@a100 +#SBATCH -C a100 + +set -x -e + +#source $ajs_ALL_CCFRWORK/start-py38-pt110 +#source $ajs_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_PATH=$ajs_ALL_CCFRSCRATCH/datasets/c4/gpt2tok_c4_text_document +DATA_OUTPUT_PATH=$ajs_ALL_CCFRSCRATCH/checkpoints/tr14-2B7-lr$1-init0.1-inpm10-outm10-atnm10-mup +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-test-lr$1-init0.1-inpm10-outm10-atnm10-mup +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$ajs_ALL_CCFRWORK/code/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +TOKENIZER_NAME_OR_PATH=t5-small + +# defining the right environment variables +export TRANSFORMERS_CACHE=$ajs_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$ajs_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$ajs_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$ajs_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=2 + +MICRO_BATCH_SIZE=16 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=32 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=1_953_125 # 50B tokens +LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +MUP_ARGS=" \ + --lr $1 \ + --min-lr `bc <<< "scale=3; $1/10"` \ + --init-method-std 0.1 \ + --mup \ + --mup-input-mult 10 \ + --mup-output-mult 10 \ + --mup-attn-mult 10 \ +" + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 51200 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + $MUP_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr14-mup/tr14-39M-grid-search-mup.slurm b/train/tr14-mup/tr14-39M-grid-search-mup.slurm new file mode 100644 index 0000000000000000000000000000000000000000..237c56a76f4550fae4dabd276478874959c4a1c3 --- /dev/null +++ b/train/tr14-mup/tr14-39M-grid-search-mup.slurm @@ -0,0 +1,204 @@ +#!/bin/bash +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 04:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x.out # output file name +#SBATCH --partition=gpu_p5 +#SBATCH --account=ajs@a100 +#SBATCH -C a100 + +set -x -e + +#source $ajs_ALL_CCFRWORK/start-py38-pt110 +#source $ajs_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_PATH=$ajs_ALL_CCFRSCRATCH/datasets/c4/gpt2tok_c4_text_document +DATA_OUTPUT_PATH=$ajs_ALL_CCFRSCRATCH/checkpoints/tr14-39M-lr$1-init$2-inpm$3-outm$4-atnm$5-mup +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr14-39M-lr$1-init$2-inpm$3-outm$4-atnm$5-mup-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$ajs_ALL_CCFRWORK/code/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +TOKENIZER_NAME_OR_PATH=gpt2 + +# defining the right environment variables +export TRANSFORMERS_CACHE=$ajs_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$ajs_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$ajs_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$ajs_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +MICRO_BATCH_SIZE=32 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=32 +NHIDDEN=256 +NHEADS=8 +SEQ_LEN=2048 + +SAVE_INTERVAL=1000 + +TRAIN_SAMPLES=1_953_125 # 40B tokens +LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +MUP_ARGS=" \ + --lr $1 \ + --min-lr `bc <<< "scale=3; $1/10"` \ + --init-method-std $2 \ + --mup \ + --mup-input-mult $3 \ + --mup-output-mult $4 \ + --mup-attn-mult $5 \ +" + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 51200 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + $MUP_ARGS \ + --save $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr5-1B3-multilingual/README.md b/train/tr5-1B3-multilingual/README.md new file mode 100644 index 0000000000000000000000000000000000000000..20d6db2604c0eae9c42dd661140769477e4c4873 --- /dev/null +++ b/train/tr5-1B3-multilingual/README.md @@ -0,0 +1 @@ +`generate_dataset_probabilities.py` is deprecated; the new way to generate probabilities is at `/data/sampling_probs/calc_iterator_prob.py`. diff --git a/train/tr5-1B3-multilingual/generate_dataset_probabilities.py b/train/tr5-1B3-multilingual/generate_dataset_probabilities.py new file mode 100644 index 0000000000000000000000000000000000000000..fec83c9422a5122f7feacbc46a7c73a9a7a0584d --- /dev/null +++ b/train/tr5-1B3-multilingual/generate_dataset_probabilities.py @@ -0,0 +1,41 @@ +import json +import os +from pathlib import Path + +def removeprefix(string, prefix): + if string.startswith(prefix): + string = string[len(prefix):] + return string + +def removesuffix(string, suffix): + if string.endswith(suffix): + string = string[:-len(suffix)] + return string + +def convert_path(original_path, data_path): + prefix_to_remove = "dumped/mc4_processed_data/" + suffix_to_remove = ".bin" + + return data_path / removesuffix(removeprefix(original_path, prefix_to_remove), suffix_to_remove) + +def main(): + """Write to """ + data_path = Path(os.environ["six_ALL_CCFRSCRATCH"]) / "datasets-custom" / "mc4" / "mc4_preprocessing" + output_path = Path(os.environ["six_ALL_CCFRSCRATCH"]) / "checkpoints" / "tr5-1B3-multilingual" / "dataset_probabilities.txt" + + probabilies_path = data_path / "sample_iterator_probs" / "iterator_selection_prob.0.3.train.json" + + with open(probabilies_path, "r") as fprob: + probabilities = json.load(fprob) + + # Format probabilities dictionary to store path in key and probability as value + probabilities = { + convert_path(key, data_path): value[0] for key, value in probabilities.items() + } + + with open(output_path, "w") as fout: + fout.write(" ".join([f"{prob} {path}" for path, prob in probabilities.items()])) + pass + +if __name__ == "__main__": + main() diff --git a/train/tr5-1B3-multilingual/oscar/tr5a-bnb-1B3-multilingual-mt5tok.slurm b/train/tr5-1B3-multilingual/oscar/tr5a-bnb-1B3-multilingual-mt5tok.slurm new file mode 100644 index 0000000000000000000000000000000000000000..be7509abe512a87a6d3006dce884e71cf01affcc --- /dev/null +++ b/train/tr5-1B3-multilingual/oscar/tr5a-bnb-1B3-multilingual-mt5tok.slurm @@ -0,0 +1,179 @@ +#!/bin/bash +#SBATCH --job-name=tr5a-bnb-1B3-multilingual-mt5tok.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --partition=gpu_p13 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr5a-bnb-1B3-multilingual-mt5tok +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr5a-bnb-1B3-multilingual-mt5tok-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/dataset_probabilities.0.3.txt` + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $TRAIN_ITER \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + --use-bnb-optimizer \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path google/mt5-base \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 10000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "zero_allow_untested_optimizer": true, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr5-1B3-multilingual/oscar/tr5b-1B3-multilingual-alpha.slurm b/train/tr5-1B3-multilingual/oscar/tr5b-1B3-multilingual-alpha.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1845b2df64fe45612c5b0af027ccd0084816bff1 --- /dev/null +++ b/train/tr5-1B3-multilingual/oscar/tr5b-1B3-multilingual-alpha.slurm @@ -0,0 +1,185 @@ +#!/bin/bash +#SBATCH --job-name=tr5b-1B3-multilingual-alpha +#SBATCH --constraint=v100-16g +#SBATCH --nodes=64 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr5b-1B3-multilingual-alpha +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr5b-1B3-multilingual-alpha-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +TRAIN_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/train_data_string.0.3.txt` +VALID_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/valid_data_string.0.3.txt` +TEST_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/test_data_string.0.3.txt` + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=64 + +# 1. going to start with PP=8/MBS=1 which is 20% slower than PP=2, but allowing 4x more resources due to bs increments of 32 in `--rampup-batch-size 32 32` - PP=8 gives us a replica of 8 gpus, so we can do 256 gpus: math: 256/8=32 32/32=1 +# 2. once BS=512 is reached can switch back to PP=2 and MBS=4: math: 256/2=128 and 512/128=4 to regain the highest TFLOPs +# + +PP_SIZE=8 +TP_SIZE=1 +#DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_SAMPLES=195_312_500 # 400B tokens + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 169_270_83 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path bigscience/oscar_13_languages_alpha_weight \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 10000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths $TRAIN_DATA_PATH \ + --valid-weighted-split-paths $VALID_DATA_PATH \ + --test-weighted-split-paths $TEST_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr5-1B3-multilingual/oscar/tr5c-1B3-multilingual-alpha-alibi.slurm b/train/tr5-1B3-multilingual/oscar/tr5c-1B3-multilingual-alpha-alibi.slurm new file mode 100644 index 0000000000000000000000000000000000000000..875fd6300b1054e366d77a65d4b3e52a0174cf1e --- /dev/null +++ b/train/tr5-1B3-multilingual/oscar/tr5c-1B3-multilingual-alpha-alibi.slurm @@ -0,0 +1,186 @@ +#!/bin/bash +#SBATCH --job-name=tr5c-1B3-multilingual-alpha-alibi +#SBATCH --constraint=v100-16g +#SBATCH --nodes=64 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr5c-1B3-multilingual-alpha-alibi +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr5c-1B3-multilingual-alpha-alibi-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +TRAIN_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/train_data_string.0.3.txt` +VALID_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/valid_data_string.0.3.txt` +TEST_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/test_data_string.0.3.txt` + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=64 + +# 1. going to start with PP=8/MBS=1 which is 20% slower than PP=2, but allowing 4x more resources due to bs increments of 32 in `--rampup-batch-size 32 32` - PP=8 gives us a replica of 8 gpus, so we can do 256 gpus: math: 256/8=32 32/32=1 +# 2. once BS=512 is reached can switch back to PP=2 and MBS=4: math: 256/2=128 and 512/128=4 to regain the highest TFLOPs +# + +PP_SIZE=8 +TP_SIZE=1 +#DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_SAMPLES=195_312_500 # 400B tokens + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $TRAIN_SAMPLES \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path bigscience/oscar_13_languages_alpha_weight \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + --position-embedding-type alibi \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 10000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths $TRAIN_DATA_PATH \ + --valid-weighted-split-paths $VALID_DATA_PATH \ + --test-weighted-split-paths $TEST_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr5-1B3-multilingual/oscar/tr5d-1B3-multilingual-equal-alibi.slurm b/train/tr5-1B3-multilingual/oscar/tr5d-1B3-multilingual-equal-alibi.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b557770cadc46439fb578e275f930d0187105e63 --- /dev/null +++ b/train/tr5-1B3-multilingual/oscar/tr5d-1B3-multilingual-equal-alibi.slurm @@ -0,0 +1,186 @@ +#!/bin/bash +#SBATCH --job-name=tr5d-1B3-multilingual-equal-alibi +#SBATCH --constraint=v100-16g +#SBATCH --nodes=64 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr5d-1B3-multilingual-equal-alibi +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr5d-1B3-multilingual-equal-alibi-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +TRAIN_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/train_data_string.0.3.txt` +VALID_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/valid_data_string.0.3.txt` +TEST_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/test_data_string.0.3.txt` + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=64 + +# 1. going to start with PP=8/MBS=1 which is 20% slower than PP=2, but allowing 4x more resources due to bs increments of 32 in `--rampup-batch-size 32 32` - PP=8 gives us a replica of 8 gpus, so we can do 256 gpus: math: 256/8=32 32/32=1 +# 2. once BS=512 is reached can switch back to PP=2 and MBS=4: math: 256/2=128 and 512/128=4 to regain the highest TFLOPs +# + +PP_SIZE=8 +TP_SIZE=1 +#DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_SAMPLES=195_312_500 # 400B tokens + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $TRAIN_SAMPLES \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path bigscience/oscar_13_languages_equal_weight \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + --position-embedding-type alibi \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 10000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths $TRAIN_DATA_PATH \ + --valid-weighted-split-paths $VALID_DATA_PATH \ + --test-weighted-split-paths $TEST_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-hub-sync-logs.slurm b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-hub-sync-logs.slurm new file mode 100644 index 0000000000000000000000000000000000000000..8d6cb250f9b50b948c75b95198375be4f7b24d8d --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-hub-sync-logs.slurm @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --job-name=tr6-1B3-hub-sync-logs # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=prepost + +# +echo "START TIME: $(date)" + +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6-1B3-prefix-lm +LOGS_PATH=$DATA_OUTPUT_PATH/logs +BIG_SCIENCE_REPO_PATH=$DATA_OUTPUT_PATH/code/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.out' -d + +echo "END TIME: $(date)" diff --git a/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-hub-sync-tensorboard.slurm b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-hub-sync-tensorboard.slurm new file mode 100644 index 0000000000000000000000000000000000000000..18bfb7c2321dba0a385880d0ad30f1b4400b6818 --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-hub-sync-tensorboard.slurm @@ -0,0 +1,21 @@ +#!/bin/bash +#SBATCH --job-name=tr6-1B3-hub-sync-tensorboard # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=prepost + +echo "START TIME: $(date)" + +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6-1B3-prefix-lm +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +BIG_SCIENCE_REPO_PATH=$DATA_OUTPUT_PATH/code/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d + +echo "END TIME: $(date)" diff --git a/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-unbiased-loss.slurm b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-unbiased-loss.slurm new file mode 100644 index 0000000000000000000000000000000000000000..05bd1cf7ade26a509a84b2066942871e6a6de4e3 --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm-unbiased-loss.slurm @@ -0,0 +1,189 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-1B3.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +# This is compared with tr3d-1B3-more-warmup.slurm + +set -x -e + +ROUND=2 +TESTING=0 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6-1B3-prefix-lm-unbiased-loss +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr6-1B3-prefix-lm-unbiased-loss-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$DATA_OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$DATA_OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$DATA_OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + diff --git a/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm.slurm b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm.slurm new file mode 100644 index 0000000000000000000000000000000000000000..230efbcf921cc0f0658826427287bf22517fc721 --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6-1B3-modeling-prefix-lm.slurm @@ -0,0 +1,181 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-1B3.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +# This is compared with tr3d-1B3-more-warmup.slurm + +set -x -e + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6-1B3-prefix-lm +MEGATRON_DEEPSPEED_REPO=$OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr6-1B3-prefix-lm.$SLURM_JOBID.out diff --git a/train/tr6-1B3-prefix-lm/tr6b-350M-modeling-prefix-lm-unbiased-loss.slurm b/train/tr6-1B3-prefix-lm/tr6b-350M-modeling-prefix-lm-unbiased-loss.slurm new file mode 100644 index 0000000000000000000000000000000000000000..e92f8cb09ce2305e4fbe81cc95a1b6363207f0ea --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6b-350M-modeling-prefix-lm-unbiased-loss.slurm @@ -0,0 +1,183 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-350M.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +# New experiment in order to check intuition behind previous failed experiment: +# What we're exploring is the notion that the loss was originally biased to longer context (smaller prediction span) +# Check the PR for more details: https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/167 + +set -x -e + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6b-350M-prefix-lm-unbiased-loss +MEGATRON_DEEPSPEED_REPO=$OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=1 # 2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr6b-350M-prefix-lm.$SLURM_JOBID.out diff --git a/train/tr6-1B3-prefix-lm/tr6b-350M-modeling-prefix-lm.slurm b/train/tr6-1B3-prefix-lm/tr6b-350M-modeling-prefix-lm.slurm new file mode 100644 index 0000000000000000000000000000000000000000..bce473a78c2d9d8991859b2a7c473275b6d7e52d --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6b-350M-modeling-prefix-lm.slurm @@ -0,0 +1,179 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-350M.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6b-350M-prefix-lm +MEGATRON_DEEPSPEED_REPO=$OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=1 # 2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr6b-350M-prefix-lm.$SLURM_JOBID.out diff --git a/train/tr6-1B3-prefix-lm/tr6c-350M-reset-attention.slurm b/train/tr6-1B3-prefix-lm/tr6c-350M-reset-attention.slurm new file mode 100644 index 0000000000000000000000000000000000000000..411b14aaa1c9e700bfb6af6afa20f446739aab72 --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6c-350M-reset-attention.slurm @@ -0,0 +1,180 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-350M.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6c-350M-prefix-lm-reset-attention-mask +MEGATRON_DEEPSPEED_REPO=$OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=1 # 2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + --reset-attention-mask \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr6c-350M-reset_attention_mask.$SLURM_JOBID.out diff --git a/train/tr6-1B3-prefix-lm/tr6d-350M-pile.slurm b/train/tr6-1B3-prefix-lm/tr6d-350M-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..d21619b6ee08d5cbdf2b8fba3ca8d001e71d7295 --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6d-350M-pile.slurm @@ -0,0 +1,187 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-350M.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +ROUND=2 +TESTING=0 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6d-350M-prefix-lm-pile +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr6d-350M-prefix-lm-pile-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +# You need to git clone the Megatron-DeepSpeed +MEGATRON_DEEPSPEED_REPO=$DATA_OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$DATA_OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$DATA_OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=1 # 2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=2 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr6-1B3-prefix-lm/tr6e-1B3-pile.slurm b/train/tr6-1B3-prefix-lm/tr6e-1B3-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..3b8a6b949248db27567f304d947659159fc7059e --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6e-1B3-pile.slurm @@ -0,0 +1,189 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-1B3.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +# This is compared with tr3d-1B3-more-warmup.slurm + +set -x -e + +ROUND=2 +TESTING=0 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6e-1B3-pile +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr6e-1B3-pile-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$DATA_OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$DATA_OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$DATA_OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --loss-on-targets-only \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + diff --git a/train/tr6-1B3-prefix-lm/tr6f-1B3-oscar-no-loss-on-targets-only.slurm b/train/tr6-1B3-prefix-lm/tr6f-1B3-oscar-no-loss-on-targets-only.slurm new file mode 100644 index 0000000000000000000000000000000000000000..497be4d7b874b66a7677a206d424aba3e6da3730 --- /dev/null +++ b/train/tr6-1B3-prefix-lm/tr6f-1B3-oscar-no-loss-on-targets-only.slurm @@ -0,0 +1,186 @@ +#!/bin/bash +#SBATCH --job-name=prefix-lm-1B3.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +ROUND=2 +TESTING=0 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr6f-1B3-oscar-no-loss-on-targets-only +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr6f-1B3-oscar-no-loss-on-targets-only-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$DATA_OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$DATA_OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$DATA_OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +conda activate thomas_prefix_lm # Debug deepspeed temporarily + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_prefix_lm.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + diff --git a/train/tr7-alibi/tr7b-check-with-train-script.slurm b/train/tr7-alibi/tr7b-check-with-train-script.slurm new file mode 100644 index 0000000000000000000000000000000000000000..93268fd5ac42351f877e597ebf488aefe1b82e90 --- /dev/null +++ b/train/tr7-alibi/tr7b-check-with-train-script.slurm @@ -0,0 +1,177 @@ +#!/bin/bash +#SBATCH --job-name=350M-alibi-check-eval-1-seq.slurm +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=/gpfsdswork/projects/rech/six/uue59kq/logs/%x-%j.out # output file name +#SBATCH --error=/gpfsdswork/projects/rech/six/uue59kq/logs/%x-%j.err # error file name +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr7b-350M-alibi +MEGATRON_DEEPSPEED_REPO=$SCRATCH/repos/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=`perl -le '$_=$ENV{"SLURM_JOB_NODELIST"}; s/,.*//; s/-.*//; s/\[//; print'` +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + --position-embedding-type alibi \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr7b-350M-alibi.$SLURM_JOBID.out diff --git a/train/tr7-alibi/tr7c-1B3-modeling-alibi.slurm b/train/tr7-alibi/tr7c-1B3-modeling-alibi.slurm new file mode 100644 index 0000000000000000000000000000000000000000..fa7569d0040d5a6dff3f3184b6a81ea8d2639219 --- /dev/null +++ b/train/tr7-alibi/tr7c-1B3-modeling-alibi.slurm @@ -0,0 +1,183 @@ +#!/bin/bash +#SBATCH --job-name=tr7c-1B3-alibi.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --constraint=v100-16g +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=/gpfsdswork/projects/rech/six/uue59kq/logs/%x-%j.out # output file name +#SBATCH --error=/gpfsdswork/projects/rech/six/uue59kq/logs/%x-%j.err # error file name +#SBATCH --account=six@v100 + +set -x -e + + +# TODO: modify these for your training setup, just Ctrl-F replace +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr7c-1B3-alibi +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr7c-1B3-alibi-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$SCRATCH/repos/Megatron-DeepSpeed + +# TODO: you may change the dataset, some examples are at tr3-1B3-baseline (tr3 = c4 + t5-tokenizer, tr3m = the Pile) +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`perl -le '$_=$ENV{"SLURM_JOB_NODELIST"}; s/,.*//; s/-.*//; s/\[//; print'` +MASTER_PORT=6000 + +# TODO: this is our base config for 1B3, edit PP/TP/batch size/model config if smaller or bigger +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --no-query-key-layer-scaling \ + --checkpoint-activations \ + --position-embedding-type alibi \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " +# TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current +# series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/tr7c-1B3-alibi.$SLURM_JOBID.out diff --git a/train/tr8b-104B/README.md b/train/tr8b-104B/README.md new file mode 100644 index 0000000000000000000000000000000000000000..80f5d82ae07f4361a8d97a6d5173088784b5100b --- /dev/null +++ b/train/tr8b-104B/README.md @@ -0,0 +1,37 @@ +# Variations to experiments with 104B + + +## Curriculum Learning + +- [slurm script](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr8b-104B/tr8b-104B-cl.slurm) +- [TB](https://huggingface.co/bigscience/tr8b-104B-logs/tensorboard) +- [chronicles](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr8b-104B/chronicles.md) + + +## BitsNBytes (BNB) + +- [slurm script](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr8b-104B/tr8b-104B-bnb.slurm) +- [TB](https://huggingface.co/bigscience/tr8b-104B-logs/tensorboard) +- [chronicles](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr8b-104B/chronicles.md) + + +https://github.com/facebookresearch/bitsandbytes + +Needs: + +``` +pip install bitsandbytes-cuda111 +``` +and adding: +``` +--use-bnb-optimizer \ +``` + +since the optimizer uses almost 3/4 less memory, was able to reconfigure the setup to give faster training (about 10% speed-up +4 TFLOPs leading to 44 TFLOPs). For tune-up experiments see [this doc](https://github.com/bigscience-workshop/bigscience/blob/master/experiments/tr8-104B.md#bnb) + +Notes from Tim: + +> It seems that 8-bit Adam is better than 32-bit Adam for larger learning rates, but that difference decreases once the learning rate is decayed to zero. I have not seen any instabilities, and no instabilities have been reported to me, but that might be an indicator that 8-bit Adam might have some issues when learning rates are small at the end of training. +> Another view is that this might also just indicate the updates are so small that they are dominated by the noise from quantization and no instability is present. It is so difficult to say because so few models were run at a very large scale, and even though MoEs are large, the likely behave very differently than large dense transformers + +> I think it could be worth a try to try a higher lr. This could counteract potential degradation at the end of training when the lr is too small diff --git a/train/tr8b-104B/chronicles.md b/train/tr8b-104B/chronicles.md new file mode 100644 index 0000000000000000000000000000000000000000..9d2fae16253b477180fdc321168f55a257de10b7 --- /dev/null +++ b/train/tr8b-104B/chronicles.md @@ -0,0 +1,596 @@ +# Chronicles + +Same as tr8-104B but using new additions such as: + +1. Curriculum Learning (CL) https://arxiv.org/abs/2108.06084 +2. BitsNBytes (BNB) https://github.com/facebookresearch/bitsandbytes + +https://huggingface.co/bigscience/tr8b-104B-logs/tensorboard + +## CL Experiment 1 + +Trying to figure out good baseline settings for CL + +[baseline script]( +https://github.com/bigscience-workshop/bigscience/blob/82fe642fb1eedd0361bac6899b79769e2c842c9f/train/tr8b-104B/tr8b-104B-cl.slurm) + +Stopped training at iter 500: + +![tr8b-104B-cl-exp-01.png](images/tr8b-104B-cl-exp-01.png) + + +## CL Experiment 2 + +finetuned exp 1 for more optimal performance + +> Conglong Li + +Here are my recommendation for next time: GPT-3 uses 375M token for LR warmup. Assuming the average seqlen is about 100 during LR warmup for CL (a very rough estimation), then we should set LR_WARMUP_SAMPLES= 375e6/100 = 3_750_000, this leads to 375e6/100/2048 = 1.8K warmup steps which sounds good to me + +For peak LR, yeah 1e-4 might be a good next candidate, together with LR_WARMUP_SAMPLES=3_750_000 + +GPT-3 175B uses 6e-5 for batch size 1.6K, so 1e-4 for batch size 2K seems to be an appropriate/moderate increase. + +Also change eval from every 1k to 150, since we can't tell from lm loss what's going on - we need the eval loss as it is reported for the full SEQLEN (whereas train lm loss just for the current CL SEQLEN instead). +150 since that's the current period between switching seqlen. + +``` +perl -pi -e 's|--lr 6e-5|--lr 1e-4|' *slurm +perl -pi -e 's|LR_WARMUP_SAMPLES=216_320|LR_WARMUP_SAMPLES=3_750_000|' *slurm +perl -pi -e 's|--eval-interval 1000|--eval-interval 150|' *slurm +``` + +[script](https://github.com/bigscience-workshop/bigscience/blob/d5fc4b22d7e88e87b4b9ec610b6c522b9a8c7a8d/train/tr8b-104B/tr8b-104B-cl.slurm) + +we paused here and decided to change a few things to better match other experiments and thus be able to compare CL's impact against the baseline. So the next experiment started from scratch. + +## CL Experiment 3 + +Same as exp-2, but + +``` + --lr 6e-5 \ + --embed-layernorm \ +``` + +that is activating Embed LayerNorm that we found to be superior to all other experiments so far, and lowering `lr` to the same as the emb-norm experiments so that it's easier to compare the performance and quality. + +``` +perl -pi -e 's|--lr 1e-4|--lr 6e-5|' *cl*slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --embed-layernorm \\|' *cl*slurm +``` + +[script](https://github.com/bigscience-workshop/bigscience/blob/5bc0d43cb782291b48c98cfba2d55ce0188f9961/train/tr8b-104B/tr8b-104B-cl.slurm) + + +Additionally, at iteration 15k the training was duplicated and one set of gpus continued running on V100 32GB and another set was run on A100 80GB gpus. Everything was identical - the seed, the topology. The 2 runs proved to be dramatically different from each other, though both eventually diverged as all other experiments in this series, we couldn't break the 18k iteration barrier and didn't have time to continue experimenting as we had to move to final model training. + +So the graphs show both V100 and A100 tracks. + +![tr8b-104B-cl-exp-03.png](images/tr8b-104B-cl-exp-03.png) + + +## BNB Experiment 1 + +[script](https://github.com/bigscience-workshop/bigscience/blob/7a1481355a1abe097a9fb2c9021c292cb9971da3/train/tr8b-104B/tr8b-104B-bnb.slurm) + +![tr8b-104B-bnb-exp-01.png](images/tr8b-104B-bnb-exp-01.png) + +Tim: + +what I have seen before with linear quantization, is that a smaller Adam eps is needed for stability. I never see this to be required for 8-bit Adam with dynamic quantization, but in the beginning of training the optimizer is a bit more unstable + +For linear I found that stability started from `adam-eps=1e-6` + +I think 1e-5 degraded performance quite a bit, so I would try 1e-6 and 1e-7 + +I am not sure how the initialization is done. It could be that the initial initialization for the embedding layer is overwritten and that may cause instabilities + +I also see that you are using weight decay. I have not run many experiments with that and so unsure how the behavior is. For weight decay the AdamW formulation is used + +When I tried 8-bit adam with fully sharded parallelism by just replacing the optimizer it did not work for me and I actually had a similar behavior as you see. Short decrease in loss and then stagnation. I think this could be related to the quantization statistics which are not properly synchronized across shards. But this is just a hunch. I think this could be tested by running a small model (maybe something like 500M params) and see if 8-bit Adam works there. If it does not work, it might be related to the quantization statistics + +So with xavier the max value for the embedding layer is 0.0106 and the 99% percentile value for N(0, 0.006) is 0.18 which is much larger. So it could just be the initialization + +I think 0.006 is still very high for the embedding layer. So that might be the issue, but could also the other things mentioned. I would leave the init value for the other layers if that worked for you + +Stas: + +I will add an experiment to leave the default init for the embed layer, and keep our 0.006 for the rest. + +## BNB Experiment 2 + +So trying lower Adam eps: + +``` +--adam-eps 1e-6 \ +``` + +``` +perl -pi -e 's|--adam-eps 1e-8|--adam-eps 1e-6|' *bnb*.slurm +``` + +this made no difference, got an identical loss as exp 1 + + +## BNB Experiment 3 + +Rollback to Exp 01, restore `--adam-eps` + +``` +perl -pi -e 's|--adam-eps 1e-6|--adam-eps 1e-8|' *bnb*.slurm +``` + +Try to turn optimizer sharding off - turn off ZeRO-1 - perhaps it doesn't work well with the 8-bit optimizer. + +``` +perl -pi -e 's|ZERO_STAGE=1|ZERO_STAGE=0|' *bnb*.slurm +``` + +Not sure if the setup won't OOM now. Got 31.7GB memory - it's borderline OOM. + +no change, same trajectory + +ZeRO-1's optim state sharding should be totally transparent, since it unshards the states before the optimizer gets to see them. But it was good to validate that in an experiment. + +## BNB Experiment 4 + +Rollback to Exp 01, + +Let's do a quick test with `--init-method-std 0.02` - we know it's not good for most of the model, but let's see if it impacts for the better the really early issue with BNB. If it does make things better then we can do the different init for different layers, so changing: + +``` +perl -pi -e 's|--init-method-std 0.006|--init-method-std 0.02|' *bnb*.slurm +``` + +Something is wrong there, as it very quickly stopped improving and got stuck at loss 8 + +![tr8b-104B-bnb-exp-04.png](images/tr8b-104B-bnb-exp-04.png) + + + +## BNB Experiment 5 + +Discovered `StableEmbedding` wasn't integrated correctly in the original BNB PR, as it wasn't doing the right thing for split word embedding under TP>1, so fixing it in [PR182](https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/182). + +Rollback to Exp 01, no config change this time around. + + +We did emb-norm and bnb experiments (BNB Exp 5) in parallel and both tracked the same lm loss trajectory, here is the combination: + +![tr8b-104B-emb-norm-exp-01-bnb-05.png](images/tr8b-104B-emb-norm-exp-01-bnb-05.png) + +BNB started diverging just before. So we can tell BNB is more susceptible to instabilities. + +Here is a zoomed in version: + +![tr8b-104B-emb-norm-exp-01-bnb-05-zoom-in.png](images/tr8b-104B-emb-norm-exp-01-bnb-05-zoom-in.png) + + +## BNB Experiment 6 + +Tim suggested that dropping to a lower LR faster and having the min-lr lower helped a lot in his experiments, so let's try that: + + + +``` + iteration 6502/ 159576 | consumed samples: 216960 | consumed tokens: 444334080 | elapsed time per iteration (ms): 31074.1 | learning rate: 5.997E-05 | global batch size: 80 | lm loss: 3.876781E+00 | loss scale: 4096.0 | grad norm: 3246.595 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + iteration 6503/ 159576 | consumed samples: 217040 | consumed tokens: 444497920 | elapsed time per iteration (ms): 31065.8 | learning rate: 6.000E-05 | global batch size: 80 | lm loss: 4.023108E+00 | loss scale: 4096.0 | grad norm: 3670.127 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + iteration 6504/ 159576 | consumed samples: 217120 | consumed tokens: 444661760 | elapsed time per iteration (ms): 31073.4 | learning rate: 6.000E-05 | global batch size: 80 | lm loss: 4.030526E+00 | loss scale: 4096.0 | grad norm: 2954.856 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + + ... + + + iteration 8600/ 159576 | consumed samples: 464560 | consumed tokens: 951418880 | elapsed time per iteration (ms): 66451.1 | learning rate: 6.000E-05 | global batch size: 160 | lm loss: 3.407058E+00 | loss scale: 8192.0 | grad norm: 3035.816 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + # the log seems to still report 6.000E-05 for a while but TB starts decaying already here - probably a rounding to 3 points issue 6564 is the last one of 6e-5 on TB +``` + +but I don't have those older checkpoints, the only one I have is `global_step6000` so will use it. + +will try: 1% from lr: `--min_lr 6e-7` so it should decay 10x faster. + +And reducing `--lr-decay-samples` from `126_953_125` to `12_695_312` + +But one can't change the lr config once the training started, so getting: + +``` +AssertionError: AnnealingLR: class input value 1e-06 and checkpointvalue 1e-05 for minimum learning rate do not match +``` + +But discovered a new option which seems to allow an override: + +``` + --override-lr-scheduler \ +``` + +So going to change the plan and switch to a recent `global_step12000` checkpoint instead, some time before the divergence: + +``` + iteration 12000/ 159576 | consumed samples: 1519744 | consumed tokens: 3112435712 | elapsed time per iteration (ms): 207967.5 | learning rate: 5.999E-05 | global batch size: 528 | lm loss: 2.985657E+00 | loss scale: 262144.0 | grad norm: 79683.827 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +``` + +so the next step with: + +``` + --min-lr 6e-7 \ + --lr-decay-samples 12_695_312 \ + --override-lr-scheduler \ +``` + +dropped the learning rate to `5.842E-05` from `5.999E-05` + +``` + iteration 12001/ 159576 | consumed samples: 1520272 | consumed tokens: 3113517056 | elapsed time per iteration (ms): 279206.6 | learning rate: 5.842E-05 | global batch size: 528 | lm loss: 3.029124E+00 | loss scale: 262144.0 | grad norm: 60983.653 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +``` + +so let's see if that makes a difference. + + +Clearly LR didn't get low fast enough and it diverged too, but even sooner than bnb-exp-5! + +![tr8b-104B-bnb-exp-06.png](images/tr8b-104B-bnb-exp-06.png) + + + +## Embed-Norm Experiment 1 + + +Since we discovered BNB did so well, we decided to try just adding Embedding LayerNorm to the normal training. So did an experiment that is the same as Exp12 but with `--embed-layernorm` enabled. + +[tr8b-104B-emb-norm-64n.slurm](/.tr8b-104B-emb-norm-64n.slurm) + + +It worked really well till 14k and then diverged + +![tr8b-104B-emb-norm-exp-01.png](images/tr8b-104B-emb-norm-exp-01.png) + + + +## Embed-Norm Experiment 2 + +Let's try to first restart with some data skipping to see if data was the issue: + +1. Rollback to 13250 and skip data till 14k: + +``` + iteration 13251/ 159576 | consumed samples: 2333840 | consumed tokens: 4779704320 | elapsed time per iteration (ms): 220631.9 | learning rate: + 5.996E-05 | global batch size: 800 | lm loss: 2.924229E+00 | loss scale: 524288.0 | grad norm: 122160.116 | num zeros: 0.0 | number of skipped iteratio +ns: 0 | number of nan iterations: 0 | +... + iteration 14000/ 159576 | consumed samples: 3014320 | consumed tokens: 6173327360 | elapsed time per iteration (ms): 255453.3 | learning rate: 5.994E-05 | global batch size: 1024 | lm loss: 2.898812E+00 | loss scale: 4096.0 | grad norm: 2553.971 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +``` + +so will repeat Exp 1 with `--skip-train-iteration-range 13251-14000` + +Worked well for a while and then started flatting out around 17k and then went on a roller coaster around 18k. + +![tr8b-104B-emb-norm-exp-02.png](images/tr8b-104B-emb-norm-exp-02.png) + + +## Embed-Norm Experiment 3 + +Repeat the same as last time + +1. Rollback to 16651 and skip data till 18500: + +so will repeat Exp 2 with `--skip-train-iteration-range 13251-14000 16652-18500` + +Actually got the same problem as exp 2 but arriving even sooner: + + +![tr8b-104B-emb-norm-exp-03.png](images/tr8b-104B-emb-norm-exp-03.png) + + + +## Embed-Norm Experiment 4 + +Repeat the same as last time but let's try another data range. Seeing how the rollercoaster started around 18k, let's go for 19500. + +1. Rollback to 16651 and skip data till 19500: + +so will repeat Exp 2 with `--skip-train-iteration-range 13251-14000 16651-19500` + +It didn't help. It exhibited the same behavior as Exp 2 and 3. + +![tr8b-104B-emb-norm-exp-04.png](images/tr8b-104B-emb-norm-exp-04.png) + +Next, will try to reset the optimizer. + + +## Embed-Norm Experiment 5 + +(this experiment was doomed to fail since later we discovered that `--no-load-optim` was being silently ignored with `--deepspeed` - i.e. wasn't ported, so skip to exp 6, which repeats this one identically except after fixing Meg-DS to respect`--no-load-optim` with `--deepspeed`) + +After 100 iterations on lr=0 after reset (which wasn't reset as discovered later), resume started with a spike, started recovering from it but started diverging again. It's still interesting to observe so saving the outcome: + +![tr8b-104B-emb-norm-exp-05.png](images/tr8b-104B-emb-norm-exp-05.png) + +So Exp-6 is what exp-5 was meant to be after making `--no-load-optim` work. + + + +## Embed-Norm Experiment 6 + +So 2 data skipping attempts didn't help. Let's try resetting the optimizer states next. + +**Half-way optimizer reset method**: + +- reset optimizer - don't load the previous states from the checkpoint with the help of `--no-load-optim` +- since we can't do lr warm up half-way through the training we will cheat and simply run the optimizer w/o updates to the weights by setting `lr=0` - now let it train for this number of iterations to emulate warm up (1/(1-0.95)) * 5 = 100 (beta2 = 0.95) +- then resume normal training, after restoring the setup to normal + +Note: +- Make sure the `step` counter for bias correction are reset when starting the optimizer from scratch - checked that apex's `FusedAdam` does that already. + + +Steps: + +1. Rollback tb/checkpoint to 16800 (last stable low loss point) +2. Calculate how to get the framework to run for 100 extra iterations and stop + +``` + iteration 16800/ 159576 | consumed samples: 7594208 | consumed tokens: 15552937984 | elapsed time per iteration (ms): 384505.8 | learning rate: 5.955E-05 | global batch size: 2048 | lm loss: 2.682074E+00 | loss scale: 524288.0 | grad norm: 180376.315 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 35.24 | + iteration 16801/ 159576 | consumed samples: 7596256 | consumed tokens: 15557132288 | elapsed time per iteration (ms): 400291.6 | learning rate: 5.955E-05 | global batch size: 2048 | lm loss: 2.657616E+00 | loss scale: 524288.0 | grad norm: 226760.401 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 33.85 | +``` + +Each iteration is 2048 samples at this point and thus we want to run for an additional 204800 samples, and thus we know we want to stop at 7799008 (7594208+204800). 7594208 was consumed samples counter at iteration 16800. i.e. the new setting is `--train-samples 7799008` + +For the optimizer reset run we need to add: +``` + --no-load-optim \ + --override-lr-scheduler \ +``` +and change: +``` + --lr 0 \ + --min-lr 0 \ + --train-samples 7799008 \ +``` + +Automating the change: +``` +git checkout tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --no-load-optim \\|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|--lr 6e-5|--lr 0|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|--min-lr 6e-6|--min-lr 0|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|--train-samples 300_000_000|--train-samples 7_799_008|' tr8b-104B-emb-norm-64n.slurm +``` + + +1. Run the optimizer reset job once as set up above + +2. The next job should run mostly on the normal slurm script to the original as the optimizer should have been warmed up + +once (1) started running, back it up and restore the original: +``` +cp tr8b-104B-emb-norm-64n.slurm tr8b-104B-emb-norm-64n.slurm.reset-optim +git checkout tr8b-104B-emb-norm-64n.slurm +``` +but the checkpoint from step (1) will now have wrong lr info, so we again need to tell megatron to ignore it and use the normal lr setup from the command line: + +``` +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr8b-104B-emb-norm-64n.slurm +``` + +3. Once (2) has started running and all looks good we can then fully reset it to normal to remove `--override-lr-scheduler` + +``` +git checkout tr8b-104B-emb-norm-64n.slurm +``` + +So on step 2 when resuming from a warmed up reset optimizer there was a huge spike and then mostly recovery followed by a divergence. + +Exp 5 and 6 have a strangely almost identical trajectory, even though exp 5 didn't actually reset the optimizer, whereas exp 6 did. + +![tr8b-104B-emb-norm-exp-06.png](images/tr8b-104B-emb-norm-exp-06.png) + + +## Embed-Norm Experiment 7 + +We continue with the optimizer reset experiment. + +There was a bug in deepspeed that wasn't saving/restoring optimizer's `group['step']` so on every resume bias_correction was significant, but it should only happen in the first 1k iterations or so (or after optimizer was reset). + +So let's try again with the fixed deepspeed and will do a longer warmup. + +Let's 300 steps on lr=0 to make the reset optimizer warm up longer. + +Steps: + +1. Rollback tb/checkpoint to 16800 (like exp 6) +2. Calculate how to get the framework to run for 300 iterations and stop + +Repeating from previous step we know we want to stop at 8208608 (7594208+204800*3). + +For the optimizer reset run we need to add: +``` + --no-load-optim \ + --override-lr-scheduler \ +``` +and change: +``` + --lr 0 \ + --min-lr 0 \ + --train-samples 8_208_608 \ +``` + +Automating the change: +``` +git checkout tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --no-load-optim \\|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|--lr 6e-5|--lr 0|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|--min-lr 6e-6|--min-lr 0|' tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|--train-samples 300_000_000|--train-samples 8_208_608|' tr8b-104B-emb-norm-64n.slurm +``` + +Once this 1st job is running, the next 2nd job is just to override the lr-scheduler: + +``` +cp tr8b-104B-emb-norm-64n.slurm tr8b-104B-emb-norm-64n.slurm.reset-optim-try-3 +git checkout tr8b-104B-emb-norm-64n.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr8b-104B-emb-norm-64n.slurm +``` + +And once the 2nd job starts, and it looks good we can then resume normally from the checkpoint for the subsequent job: + +``` +git checkout tr8b-104B-emb-norm-64n.slurm +``` + +Actually due to SLURM and not being able to be awake 24/7, I managed to get the first step to run for iterations 16801-17059 - so 259 warmup iterations instead of planned 300, so this is close enough for the sake of the experiment. And thus the resume with the warmed up optimizer happened at iteration 17060. + +After warm up, just like with the 100-iteration long optimizer reset/warmup there was a spike but much smaller this time - only 2.7 to 4.4 and then quickly recovering to 2.7: +``` + iteration 17060/ 159576 | consumed samples: 8126688 | consumed tokens: 16643457024 | elapsed time per iteration (ms): 573125.8 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 2.665295E+00 | loss scale: 1048576.0 | grad norm: 453082.517 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.004 | TFLOPs: 23.64 | + iteration 17061/ 159576 | consumed samples: 8128736 | consumed tokens: 16647651328 | elapsed time per iteration (ms): 423042.3 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 2.667121E+00 | loss scale: 1048576.0 | grad norm: 464441.887 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 32.03 | +[2022-01-19 13:09:13,305] [INFO] [stage_1_and_2.py:1644:step] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1048576.0, reducing to 1048576.0 + iteration 17062/ 159576 | consumed samples: 8130784 | consumed tokens: 16651845632 | elapsed time per iteration (ms): 422832.0 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.374269E+00 | loss scale: 1048576.0 | grad norm: 464441.887 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 32.04 | +[2022-01-19 13:16:17,683] [INFO] [stage_1_and_2.py:1644:step] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 1048576.0, reducing to 524288.0 + iteration 17063/ 159576 | consumed samples: 8132832 | consumed tokens: 16656039936 | elapsed time per iteration (ms): 424377.8 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.365822E+00 | loss scale: 524288.0 | grad norm: 464441.887 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 31.93 | +[2022-01-19 13:23:19,412] [INFO] [stage_1_and_2.py:1644:step] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 524288.0, reducing to 262144.0 + iteration 17064/ 159576 | consumed samples: 8134880 | consumed tokens: 16660234240 | elapsed time per iteration (ms): 421729.4 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.391485E+00 | loss scale: 262144.0 | grad norm: 464441.887 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 32.13 | +[2022-01-19 13:30:23,180] [INFO] [stage_1_and_2.py:1644:step] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 262144.0, reducing to 131072.0 + iteration 17065/ 159576 | consumed samples: 8136928 | consumed tokens: 16664428544 | elapsed time per iteration (ms): 423768.1 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.404639E+00 | loss scale: 131072.0 | grad norm: 464441.887 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 31.97 | + iteration 17066/ 159576 | consumed samples: 8138976 | consumed tokens: 16668622848 | elapsed time per iteration (ms): 421634.1 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.398458E+00 | loss scale: 131072.0 | grad norm: 879946.622 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 32.13 | +[2022-01-19 13:44:28,157] [INFO] [stage_1_and_2.py:1644:step] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 131072.0, reducing to 65536.0 + iteration 17067/ 159576 | consumed samples: 8141024 | consumed tokens: 16672817152 | elapsed time per iteration (ms): 423342.2 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.096038E+00 | loss scale: 65536.0 | grad norm: 879946.622 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 32.00 | +[2022-01-19 13:51:30,908] [INFO] [stage_1_and_2.py:1644:step] [deepscale] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536.0, reducing to 32768.0 + iteration 17068/ 159576 | consumed samples: 8143072 | consumed tokens: 16677011456 | elapsed time per iteration (ms): 422752.4 | learning rate: 5.948E-05 | global batch size: 2048 | lm loss: 4.137348E+00 | loss scale: 32768.0 | grad norm: 879946.622 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 0.005 | TFLOPs: 32.05 | +``` + +This attempt failed too - the model had a huge spike and diverged into NaN loss. + +![tr8b-104B-emb-norm-exp-07.png](images/tr8b-104B-emb-norm-exp-07.png) + +So at this moment we are stopping this experiment as we must focus on the 200B setup and we are going to use bf16 there and hope that things will be much better with that dtype. + + + + + +## On the importance of saving and restoring the optimizer step counter + +Since SLURM limits us to 20h-runs we have to save the optim states and everything else at the end of each run and then resume from the checkpoint on the next run. What happens if some things fail to be restored on resume? + +While investigating the optimizer reset I've found a bug in Deepspeed (on its own and as part of Meg-DS). As it doesn't saves/restores `group['step']` and so` bias_correction` is not done according to the algorithm. + +In pytorch's `Adam`, step is part of a param's `state_dict` and so it gets saved and restored with the `state_dict`: +https://github.com/pytorch/pytorch/blob/b7bda236d18815052378c88081f64935427d7716/torch/optim/adamw.py#L81-L85 + +In apex's `FusedAdam`, which is what we use now, `step` is not part of the each param's state, but wisely is maintained only in one global copy per param group: +https://github.com/NVIDIA/apex/blob/b88c507edb0d067d5570f7a8efe03a90664a3d16/apex/optimizers/fused_adam.py#L111-L114 + +The problem is that because it isn't part of the param's state it doesn't get saved and restored. + +So on every resume one starts with `step=1` and not `step=iteration`. I verified that empirically by dumping `optimizer.optimizer.param_groups[0]['step']` + +It's pretty clear that Deepspeed itself isn't aware of this state variable: +https://github.com/microsoft/DeepSpeed/blob/3293cf72a0abd5cf77a831996bd054bc908476a6/deepspeed/runtime/zero/stage_1_and_2.py#L1958-L1980 + +Which leads us to: +``` +bias_correction1 = 1 - beta1 ** state['step'] +bias_correction2 = 1 - beta2 ** state['step'] +``` +so on each resume we end up with a smaller bias_correction than it should normally be. since `beta2**1 >> beta2**150000` or `0.95 >> 0` +So typically `bias_correction` becomes `1` in about 1k iterations, but here it's not and on every resume there is a `bias_correction` of `[0.1,0.05]` in the first iteration after resume. + +This bug has been fixed in Deepspeed here: https://github.com/microsoft/DeepSpeed/pull/1525 and the +fix should be available in DeepSpeed v0.5.10+. + +Follow up notes from Tim Dettmers: + +> I wonder if this a confounding factor for the "decrease learning rate from checkpoint" experiments. The bias corrections are there to have the adam states move more quickly away from zero at the start from training. However, if you reload and optimizer and the gradient has similar magnitude as the adam state (this would be expected in most cases) then learning rate is increased by 2.2x - 1.15x for the first 5 iterations and it stabilizes around less extreme values after 10 iterations. Not sure if that initial high learning rate could move the weights into a space of instability + + +## 1.3B optim-reset experiment + +Let's train a very small model and see how optimizer reset goes there in various stages. + +1. Train from scratch for 5M samples w/ beta2=0.95 to match our 104B experiment + +This will run iterations till iteration 19686 + +train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile-optim-reset.slurm + +lm loss: 2.134260E+00 + + +2. optimizer reset and 100 iterations warmup (beta2=0.95) + +100 iterations @ bs=512 51_200 samples: 5_000_000+51_200=5_051_200 + +``` +git checkout tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --no-load-optim \\|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|--lr 2e-4|--lr 0|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|--min-lr 1e-5|--min-lr 0|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|TRAIN_ITER=5_000_000|TRAIN_ITER=5_051_200|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +``` + +This will run iterations 19687 - 19786 + +3. resume after reset - let's do 1000 iterations +512_000 samples: 5_563_200 + + +``` +git checkout tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|TRAIN_ITER=5_000_000|TRAIN_ITER=5_563_200|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +``` + +This will run iterations 19786 - 20786 + +4. restore to normal once the above completes + + +``` +git checkout tr3m-1B3-emb-norm-pile-optim-reset.slurm +``` + + +5. Let's train some longer - to 20M samples and repeat the rest at a different point in the training curve + +``` +git checkout tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|TRAIN_ITER=5_000_000|TRAIN_ITER=20_000_000|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +``` + +6. repeat reset and warm up + +and 100 iterations warmup (beta2=0.95) + +100 iterations @ bs=512 51_200 samples: 20_000_000+51_200=20_051_200 + + +``` +git checkout tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --no-load-optim \\|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|--lr 2e-4|--lr 0|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|--min-lr 1e-5|--min-lr 0|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|TRAIN_ITER=5_000_000|TRAIN_ITER=20_051_200|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +``` + +This will run iterations 48984 - 49084 + +7. resume after reset - let's do 1000 iterations +512_000 samples: 20_563_200 + + +``` +git checkout tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|(--checkpoint-activations \\)|$1\n --override-lr-scheduler \\|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +perl -pi -e 's|TRAIN_ITER=5_000_000|TRAIN_ITER=20_563_200|' tr3m-1B3-emb-norm-pile-optim-reset.slurm +``` + +This seems to have worked just fine in both cases, the model just continued training normally, w/o any spikes or lost progress. + + +## Conclusion + +It took several months of experiments with the 104B "training wheels" model during which we had 3 difficult instability encounters. We managed to overcome the first two (1) fixing std init (2) adding embed norm and stopped the training at the 3rd instability since we didn't have time to continue experimenting as we had to move to final model training. + +Unfortunately the CL experiments were inconclusive since we didn't progress far enough to be able to compare its relative performance to the baseline. To remind CL progressively grows its seqlen from 32 to 2048, so until the seqlen is close to 2048 it's not easy to compare it to the baseline which always has seqlen of 2048. diff --git a/train/tr8b-104B/start-tr8b-104B b/train/tr8b-104B/start-tr8b-104B new file mode 100644 index 0000000000000000000000000000000000000000..5b3c72070300818f94814423515c2e392935e962 --- /dev/null +++ b/train/tr8b-104B/start-tr8b-104B @@ -0,0 +1,59 @@ +# This is a python production script for JZ / xxx training +# +# Activate with: +# +# source ./start-tr8b-104B +# +# + +# # if this session isn't run via a login shell, which is the case when running a +# # command which is not shell via ssh, the bash function `module` will be missing. +# # so work around it by emulating part of the login shell that loads modules environment +# if [ -z $(type -t module) ] +# then +# . /etc/profile.d/z_modules.sh +# fi +module purge +module load cuda/11.4.3 +#module load pytorch-gpu/py3/1.8.1 +module load nvtop git git-lfs github-cli mc + +# git prompt +export GIT_PROMPT_ONLY_IN_REPO=0; +export GIT_PROMPT_THEME="JZPRod" +source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh + +# We are using common disk spaces for datasets, caches, and experiment dumps: +# +#- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets` +#- Experiment dumps -> `$six_ALL_CCFRWORK/experiments` + +# specific caches + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom + +### CONDA ### + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then + . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" + else + export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda +conda activate base +conda activate cutting-edge diff --git a/train/tr8b-104B/tr8b-104B-bnb.slurm b/train/tr8b-104B/tr8b-104B-bnb.slurm new file mode 100644 index 0000000000000000000000000000000000000000..d14f6629d991f4d845287e97ce91f2e7a7db1349 --- /dev/null +++ b/train/tr8b-104B/tr8b-104B-bnb.slurm @@ -0,0 +1,172 @@ +#!/bin/bash +#SBATCH --job-name=tr8b-104B-bnb +#SBATCH --constraint=v100-32g +#SBATCH --nodes=128 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr8b-104B/bigscience/train/tr8b-104B/start-tr8b-104B + +echo "START TIME: $(date)" + +VARIANT=bnb + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8b-104B/ +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$VARIANT +REPO_PATH=$DATA_OUTPUT_PATH/tr8b-104B-logs/ +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$VARIANT +LOGS_PATH=$REPO_PATH/logs/$VARIANT +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8b-104B/Megatron-DeepSpeed-bnb + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=128 # switch to 128 +TP_SIZE=4 # always fixed to the size of a single node +PP_SIZE=32 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=64 +NHIDDEN=11600 +NHEADS=80 +SEQ_LEN=2048 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=300 + +OPTIMIZER_ARGS=" \ + --use-bnb-optimizer \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 16 16 6_000_000 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples 300_000_000 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --init-method-std 0.006 \ + --fp16 \ + --checkpoint-activations \ + --seed 43 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 5 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "zero_allow_untested_optimizer": true, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr8b-104B/tr8b-104B-cl-a100-16n.slurm b/train/tr8b-104B/tr8b-104B-cl-a100-16n.slurm new file mode 100644 index 0000000000000000000000000000000000000000..0b41cc509098e72f8bab70b8a79f825c7e604684 --- /dev/null +++ b/train/tr8b-104B/tr8b-104B-cl-a100-16n.slurm @@ -0,0 +1,194 @@ +#!/bin/bash +#SBATCH --job-name=tr8b-104B-cl-a100 +#SBATCH --partition=gpu_p5 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr8b-104B/bigscience/train/tr8b-104B/start-tr8b-104B + +echo "START TIME: $(date)" + +VARIANT=cl-a100 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8b-104B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$VARIANT +REPO_PATH=$DATA_OUTPUT_PATH/tr8b-104B-logs/ +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$VARIANT +LOGS_PATH=$REPO_PATH/logs/$VARIANT +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8b-104B/Megatron-DeepSpeed-tr8b-104B + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=8 +NNODES=16 +TP_SIZE=4 # always fixed to the size of a single node +PP_SIZE=32 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=64 +NHIDDEN=11600 +NHEADS=80 +SEQ_LEN=2048 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=50 + +LR_WARMUP_SAMPLES=3_750_000 +LR_DECAY_SAMPLES=126_953_125 +LR_DECAY_TOKENS=$(perl -e "print $LR_DECAY_SAMPLES*$SEQ_LEN") + +TRAIN_SAMPLES=600_000_000 +TRAIN_TOKENS=300_000_000_000 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --lr-decay-tokens $LR_DECAY_TOKENS \ + --lr-decay-style cosine \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1185 \ + " + +# --rampup-batch-size 16 16 6_000_000 \ + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --train-tokens $TRAIN_TOKENS \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --init-method-std 0.006 \ + --fp16 \ + --checkpoint-activations \ + --embed-layernorm \ + --seed 43 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 150 \ + --eval-iters 5 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "elastic_checkpoint": true, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "curriculum_learning": { + "enabled": true, + "curriculum_type": "seqlen", + "min_difficulty": 64, + "max_difficulty": $SEQ_LEN, + "schedule_type": "fixed_linear", + "schedule_config": { + "total_curriculum_step": 36000, + "difficulty_step": 8 + } + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" + +# diff --git a/train/tr8b-104B/tr8b-104B-cl.slurm b/train/tr8b-104B/tr8b-104B-cl.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b2538c1a353b147249d6d1cf2cb1dc6bdde2a690 --- /dev/null +++ b/train/tr8b-104B/tr8b-104B-cl.slurm @@ -0,0 +1,193 @@ +#!/bin/bash +#SBATCH --job-name=tr8b-104B +#SBATCH --constraint=v100-32g +#SBATCH --nodes=128 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr8b-104B/bigscience/train/tr8b-104B/start-tr8b-104B + +echo "START TIME: $(date)" + +VARIANT=cl + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8b-104B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$VARIANT +REPO_PATH=$DATA_OUTPUT_PATH/tr8b-104B-logs/ +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$VARIANT +LOGS_PATH=$REPO_PATH/logs/$VARIANT +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8b-104B/Megatron-DeepSpeed-tr8b-104B + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=128 # switch to 128 +TP_SIZE=4 # always fixed to the size of a single node +PP_SIZE=32 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=64 +NHIDDEN=11600 +NHEADS=80 +SEQ_LEN=2048 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=300 + +LR_WARMUP_SAMPLES=3_750_000 +LR_DECAY_SAMPLES=126_953_125 +LR_DECAY_TOKENS=$(perl -e "print $LR_DECAY_SAMPLES*$SEQ_LEN") + +TRAIN_SAMPLES=600_000_000 +TRAIN_TOKENS=300_000_000_000 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --lr-decay-tokens $LR_DECAY_TOKENS \ + --lr-decay-style cosine \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +# --rampup-batch-size 16 16 6_000_000 \ + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --train-tokens $TRAIN_TOKENS \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --init-method-std 0.006 \ + --fp16 \ + --checkpoint-activations \ + --embed-layernorm \ + --seed 43 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 150 \ + --eval-iters 5 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "curriculum_learning": { + "enabled": true, + "curriculum_type": "seqlen", + "min_difficulty": 64, + "max_difficulty": $SEQ_LEN, + "schedule_type": "fixed_linear", + "schedule_config": { + "total_curriculum_step": 36000, + "difficulty_step": 8 + } + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" + +# diff --git a/train/tr8b-104B/tr8b-104B-emb-norm-64n.slurm b/train/tr8b-104B/tr8b-104B-emb-norm-64n.slurm new file mode 100644 index 0000000000000000000000000000000000000000..c1ee996d1ceb298fc48743fa582a9de57b315171 --- /dev/null +++ b/train/tr8b-104B/tr8b-104B-emb-norm-64n.slurm @@ -0,0 +1,175 @@ +#!/bin/bash +#SBATCH --job-name=tr8b-104B-emb-norm +#SBATCH --constraint=v100-32g +#SBATCH --nodes=64 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr8b-104B/bigscience/train/tr8b-104B/start-tr8b-104B + +echo "START TIME: $(date)" + +# sleep 100 + +VARIANT=emb-norm + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8b-104B/ +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$VARIANT +REPO_PATH=$DATA_OUTPUT_PATH/tr8b-104B-logs/ +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$VARIANT +LOGS_PATH=$REPO_PATH/logs/$VARIANT +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8b-104B/Megatron-DeepSpeed-emb-norm + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=64 # switch to 128 +TP_SIZE=4 # always fixed to the size of a single node +PP_SIZE=32 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=64 +NHIDDEN=11600 +NHEADS=80 +SEQ_LEN=2048 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=300 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1185 \ + " + +GPT_ARGS=" \ + --skip-train-iteration-range 13251-14000 16651-19500 \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 16 16 6_000_000 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples 300_000_000 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --init-method-std 0.006 \ + --fp16 \ + --checkpoint-activations \ + --embed-layernorm \ + --seed 43 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 5 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "zero_allow_untested_optimizer": true, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)"