diff --git a/.log.txt.swp b/.log.txt.swp new file mode 100644 index 0000000000000000000000000000000000000000..d84060c5af9c433ecfd4cba6fc86df26e7582dcc Binary files /dev/null and b/.log.txt.swp differ diff --git a/0 b/0 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/20 b/20 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/6 b/6 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ckpts/checkpoints_zero_stage_2/global_step120/debug.err b/ckpts/checkpoints_zero_stage_2/global_step120/debug.err new file mode 100644 index 0000000000000000000000000000000000000000..7c5fb93e2f15872aeb1748d98bf0f6d07ceef887 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step120/debug.err @@ -0,0 +1,65 @@ +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:481: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:338: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +The following values were not passed to `accelerate launch` and had defaults used instead: + `--num_processes` was set to a value of `0` + `--num_machines` was set to a value of `1` + `--mixed_precision` was set to a value of `'no'` + `--dynamo_backend` was set to a value of `'no'` +To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`. +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:481: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:338: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:338: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/__init__.py:46: UserWarning: apex not installed, gpu_migration will not swap api for this package. + warnings.warn( +2024-05-22:14:51:48,518 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:14:51:57,504 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:14:51:57,504 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:14:51:57,505 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step120'} +[W socket.cpp:464] [c10d] The server socket cannot be initialized on [::]:12345 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:12345 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:12345 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:12345 (errno: 97 - Address family not supported by protocol). +2024-05-22:14:51:59,800 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step120 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step120/tree/main' for available files. +Traceback (most recent call last): + File "/usr/local/bin/accelerate", line 8, in + sys.exit(main()) + File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/accelerate_cli.py", line 46, in main + args.func(args) + File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py", line 1082, in launch_command + simple_launcher(args) + File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py", line 688, in simple_launcher + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) +subprocess.CalledProcessError: Command '['/usr/bin/python3', '-m', 'lm_eval', '--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto']' returned non-zero exit status 1. diff --git a/ckpts/checkpoints_zero_stage_2/global_step120/debug.out b/ckpts/checkpoints_zero_stage_2/global_step120/debug.out new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a4e5626074987ab25a976957fb88c851ccb52a4 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e03eec5cdd561b7923d895648bb55777e32047dd16c045cd4e2231b332b763c +size 910989488 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..72570d5688cc54231630148018d2fc7925b0e818 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e699c85dd36c60844f1bca7b18b4f6677ccba2910634a94f50892cc9bc7f2f16 +size 910989488 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d12be608b0969e4cbe49976cb3aa199b313de4ea --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fbb23d8da6093b1d5b6d704908c284245278dec3367203ba4309632aa574e3e +size 910989488 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..02aebdc0b4ef30292e727a99ed97930cb560e0f7 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51bdaf44e5b5891f9b56464bd6123cf535eb7d8d9c2165f5ff529a28f1985349 +size 911002480 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..992e346ed7ca187e9ecbcdc496a77d9720f89690 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcca01f8760f4c9b41a8a7958445c5d6fc5fc95e4eaac50d1c0348cadde05143 +size 911002480 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b075468da1d3c966f05e38c9cb55c3c0a1648946 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:508413cf9b9dab4df41f5af2f3e2224c711b798be01c1642a8397dddbe58f3d7 +size 911002480 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..46bdf37f07b102da7016c9f8a187e8364e40f63b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e68172f4ea31351b9822a494bff20f6ae833910787c1b99c4072d0c184921d0c +size 911002480 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..57e0f6a72a0d0878db861b790091fab323aa8fad --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe9629390d83f64b0d2cd9f6d8bbbf1a92a1b44bff468d278fe118338be50a4 +size 910990192 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9c6e3f639795e694bb1481433ba6f46aa24f0b47 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2829a5c461fe6a0a3a5c13c0fb106e6d7c3eafa0f097f36346ac45586b4c0357 +size 910990192 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e09309f9580ca5c1126910e3de32275dbcc569b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aa7730861c43f1b8fe5d6b20d270e072f39ff03f32680501d520905e87285be +size 911001904 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0d5939c49abd97300b0d06e23a768cba767a25ba --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7552b42ef6bd6c18b6f3ea9d051fef799e9536b0206712ebe5a3d3155d75538 +size 911001904 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/debug.err b/ckpts/checkpoints_zero_stage_2/global_step80/debug.err new file mode 100644 index 0000000000000000000000000000000000000000..662d20b6fd47e26778f2f99294eda5f32dde4493 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/debug.err @@ -0,0 +1,65 @@ +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:481: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:338: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +The following values were not passed to `accelerate launch` and had defaults used instead: + `--num_processes` was set to a value of `0` + `--num_machines` was set to a value of `1` + `--mixed_precision` was set to a value of `'no'` + `--dynamo_backend` was set to a value of `'no'` +To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`. +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:481: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:338: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py:338: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. + _torch_pytree._register_pytree_node( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/__init__.py:46: UserWarning: apex not installed, gpu_migration will not swap api for this package. + warnings.warn( +2024-05-22:14:57:47,840 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:14:57:56,278 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:14:57:56,279 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:14:57:56,279 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step80'} +[W socket.cpp:464] [c10d] The server socket cannot be initialized on [::]:12345 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:12345 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:12345 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:697] [c10d] The client socket cannot be initialized to connect to [localhost]:12345 (errno: 97 - Address family not supported by protocol). +2024-05-22:14:57:58,809 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step80 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step80/tree/main' for available files. +Traceback (most recent call last): + File "/usr/local/bin/accelerate", line 8, in + sys.exit(main()) + File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/accelerate_cli.py", line 46, in main + args.func(args) + File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py", line 1082, in launch_command + simple_launcher(args) + File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py", line 688, in simple_launcher + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) +subprocess.CalledProcessError: Command '['/usr/bin/python3', '-m', 'lm_eval', '--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/idc/cronscript/ckpts//hf_ckpt//global_step80', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto']' returned non-zero exit status 1. diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/debug.out b/ckpts/checkpoints_zero_stage_2/global_step80/debug.out new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6f8f277d7d717868244b750121d5e70ab2be3ce9 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00c05e168931cc0351f55bbc01b605bd26cb1a84f2e3789a0677e746fb2da53c +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0a7cb05967d09421f209bab31abafa707db24cf7 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b90298c6641f7b52adafb4fd56979bc6d8e497a21b0166f4df6612757c773d59 +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e8fe449212f908418c98417cd45102af52ef957a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_01-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22c24e2763bf587f0e729aa2fcb55812f234b3d2f9f076a2799d749dfdf1634b +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8663190e03f178083da03aa7f5634ab6dfebdb9a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ee72f0ec50c3b33cf51e463e821b2c5f8870569b2f1bb8cd5615bb4bc0dc6fd +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a559c6072e6d4561742cf65c48cbd31a7d1ceea9 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0de0ada53aad4505363ce604c322467b815564de3167c06adeea8280b420d8a4 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..709b8474dc7ecb95576c4a63c8f10127b05b19f3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_03-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7a688cd078a5ccdfb6bd32a4ac0e159816a08d39a1ac85034c339a12ccf9876 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4f49485e417e1d096bec6ef15a93b59c28395119 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b83a193c99ce934287487c28901f2230ac990f91b9dfc2f3bebf77aff7b8d75 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8cb10ee834c0162c4136c7fe01ad34466d676a83 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d384bdc809026461398c1196e67e3fd10fd68b619d786ceb2cd6960fa0df745 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..01d63d5a8c28086eaa7b4129af5e48f4a99380ee --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0d489e99dc6d78c2a5f6d0ec6462891f77b3ca90d791d3d98c438ede169e1bb +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..853b8040cac2cb2109db6f33fa3ea90e29060eaf --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_04-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9deb1b388210a7fbdeb0778465f7c93e566fe585abf088bd33fd14b29480a9b0 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..fd22446b0b72339f8b002ad0d31cc477067e74fd --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33709595b98603186e534fd86ccc2ce5a7ae02f8375a40591745687abc2b0867 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9c4b19a4a78cd89c24c5c345835bac8c08b028f3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:159d1f3fcaed1108f84afbe7887c621bb7b73e00bf78f9568fa134f522690b8b +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b6f320329e900022705c12a4482f39ae197756f9 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9edde2bf9dae4e3d255e8984281a3192858d27e00fa017fed286f440ca778d0d +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1cb78234f2b8b93738d9f67d44b78b232a1f1dbe --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_05-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db0a823b29bc1d3f679b5d381ea84a951fa9383effd9606c760531e5a7006835 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5928c18dc47a0ae47e329166bba1013a17b36f96 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc91533e49216a2faf3050f775ccab91a135b9f6ec82b7141a2bf08aabf5506d +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..59da81595e428b9777b2a86bb306f7d19b876843 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d14cebc24fefb4ac6aa58e8cf781bf40cac78d3d781f8ebcf1267bf958eda359 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..208e68690f3d3aac66947737373c5f4f551214e3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:921ccd22b999b40d54e3a57cb2c43fc57e554ecce6664b04156323f952ba1a9b +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..823686984af7c298ec10aade5596e97e325c4e81 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_06-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb789b6cd2096d31dcddd6a175d4c7147c64eadb6ae53704f740341c68101368 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..91c0fa16eb3405ba6c189d4e7e5fd2a1200225e2 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59af0c075636c9d46d1251626b77cc6521d53bcbd62784e15feb213f2c4a589c +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..fedcecc1ab1b5e6a1befba59f932bbe392f6b56a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64e3b59c80f6094d58a847f879b12e59bf9ce25e85d1c110a005b3771c31ba74 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cdd82939c89a49d46e2b8b3132321467aba269c3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da83697debb11aafc3229b569f81add5e75d4d87c2ebb02d7bc663c272abb2ed +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9997f56bdef9b15193dd2d1be784cc11b1cc805a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_07-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c251848ecce86c3b3374aba783c473ad22c2105da892774d003336bcb7a4bb1f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ecf1f4b2268e1f47518977b66aa1f9398153f090 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40c25384673523d6fd9ebad91a7ad1074dba6db1dd90b30929d199dc1977fd9b +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b501a4fe19ee0277d02904a940fd2507523f961f --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ba036e29617f0221a333d06540a9103c85c160dfd47c3a26eb61ff449e805f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8c1d5c217bb36b3cc5d8883792650fba033aeb16 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cff6dc45b0c749585c295db43807c141b3e7c1928dcc12058ce92894aef1079 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d8aa86b8543168f6fc07e8f64d4c285a57c57d6b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_08-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccfa5f86ed8813455f03bd4cd2df5e1309abd0b62869a421d5ea6b5e0c90fc37 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4f0a582ef28acaa426726e2ca34f70da4d923752 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cce272865ff0945ec37429df49c6e4426416a2e03a39174e6dc4d6e0a58200cd +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5b8e93fe6e97dce39fb244bdb758c23dea6d05cb --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf03be5b64a16d32fc6634bd87ab6e42d3a6e4cc90a602eae0bcbf8e9d26922e +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e29a228fcc2d5584e6030f6fb90e00efb3ace808 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:404c9566e4dd7a881435b73ba1e43f04ef575accb6452ceb7714941473fdf234 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..01c820d1c5937d8c4f95e1becdd5f0af03e37034 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_09-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69cbcba8ef4c2444b2f65d4a3c2efba1a4f8c588438b9d6cf25509cf12919990 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3ef4a8afb07fa8933ae083bc095f2fbb86df6d55 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac9260b078996250637ca1512e376ced0ce76d63aefac7ecdf00deaa650bc9ad +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ac66d17f842dfa7ed616386ef43d7d193bddb1b0 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fd495d8943c8ef645d1cc991749988786741338883ca19faee6e9c0d3998970 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..90a4bc28cf4d752f11affb856b7f842c0f037f17 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c9bd456b2466d431638444c623387d2a4c1b6671b8de82d591bceef88edb1d5 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..179fedc9e495a81fc63d2635c29c044ccdd3e4f7 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_10-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fef66ee9450541ef97861fc22fc20ab73b0f97e3a2b9605177766de42b03761 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9a2056cb404e1567e5f7a9d42bddbbc8cd131986 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cd4d3007b7bb0081c39fe62fc528e82e2403502e786de5f510dac86baebefd3 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..00c8af595b8842bb5cb5c78bb20e19385c306be5 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:386d1245f751553634d5e088337a8ec92b6b48f3aca4feedd351696a3f749e67 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b6fcad518e9503f3a8ecf6e9fba199860a7b0af7 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4950516c621231ed21e82c823b24f60dfee5b9aa9a7e347510f972f67b08ba89 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2e5619985ff76c34b2fbd0237b264eeac1b1e8a9 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_11-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3979cd2a04ba27d53e2f9b094d205090c6d88d3b472c8d6a134e4bdee321415 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..882d1aaed6cc5b648979a83fd2791eca34708551 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:106ce9d1905959ef7c0da454f57ae3d46377e90efff4065fe93ed2d46d762a65 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..86b2d95bd2cd515e4e9ce36378bbc3fbbd208889 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c46a97be50f86af6ec961a18d0eae699d0bbf31e2975f55ef87140df51378384 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d37c50fa97d014f482cd0ddc49fe9646571810ef --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99ab227cea8c7702a1fa932350f8d7bbc57ff68bb1888aaad8459cc8020e41f3 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..811e42aef03612bc361f8fe545ad7a2cb4c74aa6 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_12-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38b19dd675a0e675319477f00b36ad800b18f98550f16b4147b37176ae5a3a0b +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7d85378fe3fb11ed43381cd8c9d5de0cf423d078 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd5f025f1e306a8a0779d5acff03bf7ba475c46ef475bcce989a877f3de4f333 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..57b64caef4012f221bd45b993b8214ffd584d281 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d82c03e9a55e91cdc7e996fbef5973885870614118e3954abc660415960eef1c +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..629025d684f79b1f4144cd4c91e86cb924f7a8cd --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c051ad7a1dc46856bda4aaf331eaa5c4a22c630c226545c53875dc32ea7520c +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..510f22a4372bc30bcc6edaf046ccf26d0b761f11 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_13-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:390f9a9a6c2d8ecdd3038f350a432ecfe7adc62dd19578bc9131b7358fe69f47 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b6de62f429145c3e5a1abc62ddbb92f37dba5f99 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aaaea1421b2ea14bc98037d20555594d456cb165895b9c3b3b5050d66e5bd7d +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4954f5b57b85a00b91fba5dad340699662cf927d --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaab62d4f086244c7034bcc902022e5c6c75645001ac5ccd7ee0be8df9a97f73 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2d71321fab22aa3e55ef1bac0f31ee96572f5618 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6e4b0cc005818fe7b0a71c1dd4b572a03e83c2992e2cf2e3cba1f83426a7d9e +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..782287107b9af2a4070b6f77492a8d6be51d10c0 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_14-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:440885b94fd113727c87cc8acfb50c47c1d58d2cb156b44d1c2100bc5b043690 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2daab9243d3c96b01711c4142a63e23dba8b6632 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3021cece99ffb66e892e7648450ab8afdbad6954a07cfc4d299d345bfb175d9f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cf30974b7b0d4d8141a2a17585aa419315897690 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:547f1174e7329a85be2a8afc900f1bbc26e14440d4dd5826525f6905adf6db3f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..981e3fd2b071752f8fc1b525d35912062421c643 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_15-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d276be3820aada96758d5030f443b09bcba266eda63da0189af80bc84421d6cb +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cc8735dfa17ee6bee2c5c3346b29539d287d8d3c --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a55fef50632a9b05d9e51e4f712f268d8ffa1098b1cad852c624da5fa704dbf2 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3b555f35a818432dba9447474ffdc2159e0bfa78 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d16490c583a26ef0432c1f0344817c350e6626818c473a144fd99f4421034f2 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..282cc01f09fa6391078c40a02ac897c053d885b1 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fe8b5ba3e1a81f9930a7b7bc193dd5316fc805f0f4bcfe1264ea2beeb5c2a0e +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..493cdae7ec94c5ac6c6482dd42ca249048acee29 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_16-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9304834bf0462cce39dda60287378cf93b8b3ff10ffac232e80d32988a60c7e +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f6c98f7a3a00a7a4f651f49d2934a680a8a25efa --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ed66af5c54e6e5cff08f99382a289424d25ebd7b57d4d81116c79d783ee2d4c +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cb7ebcb2faea35b0c25f6db65689adacf0403c09 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94574c84127c86d0ad3c0513fd175973274e045661a176e3c7b05c56daccf078 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cd2ed99e224cb66a0fd2e2a291db0c7558db18fb --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b568ada3a1cfd4306b702a7fab746f193ad1b710d04e85b0c999ed5a41256896 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5b4547110a7f142c6f5f8c0611b7de3f0be17997 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_17-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aff2888403ef1325edab510e4ace727836d86f726d1b2e1247cc16fd565a7946 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4c9f82fb1bccea80b343b618c9a30e5db2bdaca0 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c91eb050999f99750eea6811ea89d51dd815f60b53fd8a2f4d9fcc5cdced5670 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..650b44b2e08324bec8709a960d93d1d190a781d1 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b779e443077fb89855ac40675bd0a3bf1cb55f9c4d12e0e322d6a8ecbe32952 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..653b03c9d4fc2c7783b948927e5b2fb0a40630f8 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:494af2ab72b04ec47cf2aa842d7b0eb7bf5e5b1107cdce656a90fd61c3918e4f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c15a21e45604235eacf4161c9bbd73e7124425b2 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_18-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06b75bd86fa4f29f9399a5d3941cc1b4dc858280f3a768efb4db917cc19ff523 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..45f80993bc3680e5c3caca7e91e980c584fb11bf --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8eec1003a590c27b8f825648cd6242500b14df6b2cbf5a395cb357e2898fe6 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5c59f04fd3120628bc7bb2bdd2f4677fd3fea24a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb21ab6a9730c90acc35662e0064451a2aca568c009c8586a5a954fa0d6e55c9 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..03e337a18d04c6c3559e49c0371fb6f96b4aed31 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54313592841b96291116d2fe469ce7b66e42d473ac61d0f23a7ed0470a3fea3f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5caf14b661920de39ee4b226c99f29c041b02bbd --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_19-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca5188e6c22610cc4194130282ab92d0ff42cf52e570d56609088194887dcb92 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..18922d793cebabcfb52d1a6a141e6e1e9440ecfb --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:810ae3ee5296e53cd12f2645845dcc407057b7881100e0a22c1399c113a5b927 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a8b3e91a820dbbfda72196d7bbb5af72f247e922 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a832294c0dddfdfb4ce11c1e90ce0363f3b7c0824985603c1d213d270b1f3c48 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2ef973f6c6a868d1fa0461e2f76f4b929704ae14 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3618a5abf70f0a16a1e9161d040816c1110ce5c6e1e1fc3104d099e3880f471 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c15f4dc3c85a27f1c595d5c4d666dd7ac9f41c40 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_20-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4be8fbfee183c92772b1f0a0c5e457d30b1a26523f45526bee7e047144882f1 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8a853d1f25c396757dfaf7e74033b4c9ecb76233 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac823949bc830b7878f36d2f51df1e92dabbda24b1ed5c1fbef63d4bf965821 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a5fd3bde727ff7240d8e5017199733294d589fb --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56b63a74cb3edc244db518217d5dedc165f419189bdb419588005aa41e06ecde +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..dac64fe18a9cad6f1f23ecc6440a957a0a64e81d --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da1d181972444fae466cc3b22a6ba90b9d714cbebe1f5b8b4adc4fca35830892 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..55af66f43eff7ac21f9a07be90e97ae40fb5ed6e --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_21-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3417b79c03fb15803ef36bb1aae90de928bedf72431a4195999258b82bb0bdf5 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..77d4ca5f807a2467ba7adc1530ab8c3ce4dd9352 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0a4863f2d285e40684851a2b61fa68f5a5835a4cd5ea4a1f94fda3049ef9273 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..be8f499961682a676883226aa7fc6e9af1f68af2 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6a8e7da782c7b9da8859d7754260fdbb5e9d39604c32da6a67694bf48373e81 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0d2773a6dac5476d8c199073c63fa794659b3c5b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:822f22a6e44b48576b0eae73e6eac5249ed90622eb007c85ff897d1686a50a0c +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..50159fe24c7dee03628e0b2654141ac3c66ae5a8 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_22-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dfe97c7f9fdbeba63b2ac3ec218c37349ecc48e134d4438565192b74bb580cd +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8eefd0a9ed889061fbff3c26c33c61b187e95109 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bf4b858ed94b690bc9e34adbac7e128df6f751542ba0407cf7993e2311e629f +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f40656ff748917c6fbbfc8417503f5fba65b505f --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4413be42e06c71dc087f700a89c73431161d592217b492093c9446c134c2c1a +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9d2a1da9eac7b883498594cedef4fdd749f2c675 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8862c407d9677afe408bc5e413507d694c24bf81b700d24bcfed1e025000e3c0 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a38358d0cd21e13220e7b4785502518ac22cb31e --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_23-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:566e69fb7a9e477b3fbccd163a3e66a58ded244cffe5cc5dc1c9cdd43f3d1b93 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..bb1860dd8d5eeffb447ab0bbb092f0370837d8d3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0c276ecbeffca1ee37db9961240247a9bc3ba283e1d201a640bdd53ffe9f0bc +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..eaf430332c1c3056eea53662b9ed53833da3d62c --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa5d0b759a10b3ed7a39f7c906d6fdf823483174ce2db84eeab7b547c4299588 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b17428a4120db9690a0bed2791a02b58b4c6afc4 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:824b0bcca4a270014ced0f02bdd8cc86ceb0d277b08e12ab8e8f7f53c6198c92 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..060010dbd8bf7e954388788782137d448fceddb3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_24-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6739e1b7f05df3dc92270e80cf0a276d19b8785f10b03b0e6a0d034c6853db4 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7f8979e4f06619733d7dfe7ca707a004649f2fbd --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b617f98aa2e6fda0263dd07552f72d532839e75ab09f7df5ed6285c90ed5b6b8 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..707b7db18075672da33be12e40777a0d2e6ebf51 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b01b0112338e60c5d9b1d73e071f8930d007d2dc1b3f413c2236968c9ae9ad43 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..10a80c94a41cc46f9b5aef7ebe2fe6c8fab2c8c1 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81d2ee43f390ff5c5056804e6c10890dff28443b77e233b4d333156f4a36f214 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2d41dca527dfde08984b204650213a7d3f23589d --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_25-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3969756d646a5b6acd5b595f54a75d9f48c3bd9220b13b205a3061c5cdfbdb0a +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d89a17f30a13dc432632b8893aa46d6d07ff766a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8791003a82f563a6c3d9930da61976cf4301953c092d41a541d663ac83a3140b +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f243d4a800629b6de33919de5161f21d0652a98a --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24b27752d2b93f864290984315dbe64462a6cdc9c5518733e28d1eccae8825b4 +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..efa61e81759d7b4b066c6d9a742babe511aa2871 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c99e1549f6f5bd5c9f57da04d2916df7df2b6cc33beab99dfac8a985f4a4c84a +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..be0af42ce752237c56d943d814bfada60be815af --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_26-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6429d0ac013182d4708c1e598eedfeaba0dd016b4fa8ec39f176d7f52052a43e +size 20983188 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0288b340080b7c57a5667d8f964bf00ff6b4f7af --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4f9111fa0f656ea384e87f4c3df21535251aeb0f0afa459e763438e5710c88f +size 5519 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c07ffd56ae55cbf337ee2817bffd0eb9476babb7 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cfd40b6107dfcecb3588567ab3914c0039eb142433fa8f1496339fee08e856c +size 5519 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ef475ca5a3fefa12daf7bd3bbc0f4f82c4f7eee3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1d089a4337f9e4713f1ad6ae9b7b623450f7d446baf04f37ccaa4fcc6e8dcda +size 5519 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0bb7d2c53478b09ea6e66a28ee024ad68f7f8abd --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_28-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:445f4f12ecc6e61f332d48bc83fd48e1fa96ce7bbe476e6e17d63a1d88a093d9 +size 5519 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_00-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..09d8331deff8a78058d5e41c1a3d29af094225b2 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fef67b0a2e66b3936c01e46c8dafe53648111284bf3e719b0ebb3b599c1e000 +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_01-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_01-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2b48a3d172ed208c9e30bf3d482feb2043c4f47b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_01-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67047679800c25ab50fc9c4e1ae84df55d64fdf64a0308feb9a2add996a7d07d +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_02-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_02-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e2e81b33367b82a6ca0492b42189c695e3cb7631 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_02-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce4ffd96f03a82d3e9d3c0ff89bc51dddf57c413c4e077fab68de039a6fdabe9 +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_03-model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_03-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6e92a751569bb2efc8fb7ae70a5c90ffaf0b2bf3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/layer_29-model_03-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca9614132a18ec81abb5ce479af1e43c11d0944b45c83d3a578af2d29c8c778a +size 51905935 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_00_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4b6bb0ed1b1e90e4ac21d2a2c70c3adfcf6e71ad --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:248c3bfb4d9333f29546bd0b9afaa0d6cec817dc341c7cecc8b6daec507db488 +size 4230084 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_01_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_01_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1c9cf2cc1505bb747631166eeb244cc893aa59ff --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_01_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6338497930be280426064cc7abbbaf23608fce81285136bee6cee9579d29c3c7 +size 4230020 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_02_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_02_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..757f8d6506cd4d54bdf6bec8632086c5d1fba3f3 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_02_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ee99d500a1ab28ba1a9c1410096e9446535d2af2e6670616c1f0ef518c3b631 +size 4230020 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_03_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_03_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..80a12dcab3f79a252262ad86db0aa03f80bb7d37 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_03_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bdcf353c80c6ee0b2f296b4123df1444b6ee988fcc7039676e30366d2ae1a92 +size 4230020 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_04_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_04_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0e05f845f05bf004e1d05bde30caaa3fceba194b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_04_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d10e30f538d9e1b2fb52e0d99c9e49e70d5d27d63cf8cd50157878fc1bf3eff +size 4230084 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_05_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_05_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d96cdb2d93f1ec05c9e708886d404254d2aa7311 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_05_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c28b496aa07ce8ce02014956f88e30108446d183cd34a4914a1fac4a20546f4b +size 4230084 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_06_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_06_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2e0f93ca68b9fc7377f57636e55b55d572733c18 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_06_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c63a0ffdc3354c06ac43f69a873b51aeb3d80562ef5d8d3b339be80af010745 +size 4230084 diff --git a/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_07_model_states.pt b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_07_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c9353bc38e79b1c9c34de218bed8c2d5c71af0a2 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/global_step80/mp_rank_07_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74055ebc8e077321bdb4577d270ca867236033d1ffac996bd0d00d7ba42b94fe +size 4230084 diff --git a/ckpts/checkpoints_zero_stage_2/latest b/ckpts/checkpoints_zero_stage_2/latest new file mode 100644 index 0000000000000000000000000000000000000000..0e13e0563ec45a863d519305a1251d3e72b9e3e4 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/latest @@ -0,0 +1 @@ +global_step120 \ No newline at end of file diff --git a/ckpts/checkpoints_zero_stage_2/latest_checkpointed_iteration.txt b/ckpts/checkpoints_zero_stage_2/latest_checkpointed_iteration.txt new file mode 100644 index 0000000000000000000000000000000000000000..8bc6583717d617da1f8bf8bbaa7c16c25ef17113 --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/latest_checkpointed_iteration.txt @@ -0,0 +1 @@ +120 \ No newline at end of file diff --git a/ckpts/checkpoints_zero_stage_2/proc_files.txt b/ckpts/checkpoints_zero_stage_2/proc_files.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ckpts/checkpoints_zero_stage_2/zero_to_fp32.py b/ckpts/checkpoints_zero_stage_2/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..49b846633d6eb1e836e34681e44033581f4edb7b --- /dev/null +++ b/ckpts/checkpoints_zero_stage_2/zero_to_fp32.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: python zero_to_fp32.py . pytorch_model.bin + +import argparse +import torch +import glob +import math +import os +import re +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + + total_files = len(files) + state_dicts = [] + for f in files: + state_dict = torch.load(f, map_location=device) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + if zero_stage <= 2: + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + elif zero_stage == 3: + # if there is more than one param group, there will be multiple flattened tensors - one + # flattened tensor per group - for simplicity merge them into a single tensor + # + # XXX: could make the script more memory efficient for when there are multiple groups - it + # will require matching the sub-lists of param_shapes for each param group flattened tensor + + fp32_flat_groups = [ + torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) + ] + + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = fp32_flat_groups[0].numel() * world_size + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + for name, shape in param_shapes.items(): + + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # XXX: memory usage doubles here + state_dict[name] = torch.cat( + tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), + 0).narrow(0, 0, unpartitioned_numel).view(shape) + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + + Returns: + - pytorch ``state_dict`` + + Note: this approach may not work if your application doesn't have sufficient free CPU memory and + you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + """ + + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + print(f"Saving fp32 state dict to {output_file}") + torch.save(state_dict, output_file) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument( + "output_file", + type=str, + help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/ckpts/ds_config.json b/ckpts/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/ckpts/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/ckpts/log.txt b/ckpts/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..2f98cd8133ba02a297bf569784da2843b47d397e --- /dev/null +++ b/ckpts/log.txt @@ -0,0 +1,4240 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 10:38:01,087] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-24 10:38:02,510] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-04-24 10:38:02,510] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.134.158,100.83.134.148 +[2024-04-24 10:38:02,510] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.134.158,100.83.134.148 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMTM0LjE1OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XSwgIjEwMC44My4xMzQuMTQ4IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddfQ== --node_rank=%n --master_addr=100.83.134.158 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 4 --pipeline-model-parallel-size 2 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 20 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_600M/24-04-2024-10:37:59/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_600M/24-04-2024-10:37:59/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_600M/24-04-2024-10:37:59/hf_ckpt --save-interval 20 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.134.158: Warning: Permanently added '[100.83.134.158]:3122' (ED25519) to the list of known hosts. +100.83.134.148: Warning: Permanently added '[100.83.134.148]:3122' (ED25519) to the list of known hosts. +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:04,207] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:04,300] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: [2024-04-24 10:38:05,628] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.134.158: [2024-04-24 10:38:05,628] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.134.158: [2024-04-24 10:38:05,629] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.134.158: [2024-04-24 10:38:05,629] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.134.158: [2024-04-24 10:38:05,629] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: ...done. +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.148: [2024-04-24 10:38:05,843] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.134.148: [2024-04-24 10:38:05,843] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.134.148: [2024-04-24 10:38:05,843] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.134.148: [2024-04-24 10:38:05,843] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.134.148: [2024-04-24 10:38:05,843] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.158: [2024-04-24 10:38:07,373] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:07,376] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:07,377] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:07,384] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:07,390] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: [2024-04-24 10:38:07,392] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:07,392] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: [2024-04-24 10:38:07,475] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.148: [2024-04-24 10:38:07,667] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:07,670] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:07,672] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:07,672] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: [2024-04-24 10:38:07,673] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:07,678] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:07,679] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: [2024-04-24 10:38:07,698] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: --------------------------------------------------**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. --------------------------------------------------[NO] ....... +100.83.134.158: [OKAY]DeepSpeed C++/CUDA extension op report +100.83.134.158: +100.83.134.158: ---------------------------------------------------------------------------------------------------- +100.83.134.158: +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: using world size: 16, data-parallel-size: 2, tensor-model-parallel size: 4, pipeline-model-parallel size: 2 +100.83.134.158: accumulate and all-reduce gradients in fp32 for bfloat16 data type. +100.83.134.158: using torch.bfloat16 for parameters ... +100.83.134.158: ------------------------ arguments ------------------------ +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: accumulate_allreduce_grads_in_fp32 .............. True +100.83.134.158: activation_func_type ............................ swiglu +100.83.134.158: adam_beta1 ...................................... 0.9 +100.83.134.158: adam_beta2 ...................................... 0.95 +100.83.134.158: adam_eps ........................................ 1e-06 +100.83.134.158: adlr_autoresume ................................. False +100.83.134.158: adlr_autoresume_interval ........................ 1000 +100.83.134.158: aml_data_download_path .......................... None +100.83.134.158: apply_layernorm_weight_plus_one ................. False +100.83.134.158: apply_query_key_layer_scaling ................... True +100.83.134.158: apply_residual_connection_post_layernorm ........ False +100.83.134.158: attention_dropout ............................... 0.1 +100.83.134.158: attention_softmax_in_fp32 ....................... False +100.83.134.158: bert_binary_head ................................ True +100.83.134.158: bert_load ....................................... None +100.83.134.158: bf16 ............................................ True +100.83.134.158: bias_dropout_fusion ............................. False +100.83.134.158: bias_gelu_fusion ................................ False +100.83.134.158: biencoder_projection_dim ........................ 0 +100.83.134.158: biencoder_shared_query_context_model ............ False +100.83.134.158: block_data_path ................................. None +100.83.134.158: cache_fp8_weight ................................ False +100.83.134.158: cache_fp8_weight_fwd ............................ True +100.83.134.158: checkpoint_activations .......................... False +100.83.134.158: checkpoint_activations_granularity .............. full +100.83.134.158: checkpoint_in_cpu ............................... False +100.83.134.158: checkpoint_num_layers ........................... 1 +100.83.134.158: clearml_config_path ............................. None +100.83.134.158: clearml_continue_exp ............................ False +100.83.134.158: clearml_exp_name ................................ None +100.83.134.158: clip_grad ....................................... 1.0 +100.83.134.158: compression_training ............................ False +100.83.134.158: consumed_train_samples .......................... 0 +100.83.134.158: consumed_train_tokens ........................... 0 +100.83.134.158: consumed_valid_samples .......................... 0 +100.83.134.158: contigious_checkpointing ........................ False +100.83.134.158: cpu_optimizer ................................... False +100.83.134.158: cpu_torch_adam .................................. False +100.83.134.158: create_moe_param_group .......................... False +100.83.134.158: curriculum_learning ............................. False +100.83.134.158: data_idx_path ................................... None +100.83.134.158: data_impl ....................................... infer +100.83.134.158: data_parallel_size .............................. 2 +100.83.134.158: data_path ....................................... ['/data/arxiv//tokenized_text_document'] +100.83.134.158: data_sharding ................................... True +100.83.134.158: dataloader_type ................................. single +100.83.134.158: DDP_impl ........................................ local +100.83.134.158: decoder_seq_length .............................. None +100.83.134.158: deepscale ....................................... False +100.83.134.158: deepscale_config ................................ None +100.83.134.158: deepspeed ....................................... True +100.83.134.158: deepspeed_activation_checkpointing .............. False +100.83.134.158: deepspeed_config ................................ /data/output/llama13b_600M/24-04-2024-10:37:59/ds_config.json +100.83.134.158: deepspeed_mpi ................................... False +100.83.134.158: distribute_checkpointed_activations ............. False +100.83.134.158: distributed_backend ............................. hccl +100.83.134.158: do_layernorm_bias_weight_decay .................. False +100.83.134.158: do_pretrain_validation .......................... False +100.83.134.158: ds_inference .................................... False +100.83.134.158: ds_pipeline_enabled ............................. True +100.83.134.158: embed_layernorm ................................. False +100.83.134.158: embedding_path .................................. None +100.83.134.158: enable_expert_tensor_parallelism ................ False +100.83.134.158: encoder_seq_length .............................. 2048 +100.83.134.158: eod_mask_loss ................................... False**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: +100.83.134.158: eval_interval ................................... 20 +100.83.134.158: eval_iters ...................................... 10 +100.83.134.158: eval_loss_exit_value ............................ None +100.83.134.158: eval_micro_batch_size ........................... 1 +100.83.134.158: evidence_data_path .............................. None +100.83.134.158: exit_duration_in_mins ........................... None +100.83.134.158: exit_interval ................................... 0 +100.83.134.158: expert_interval ................................. 2 +100.83.134.158: ffn_hidden_coeff ................................ 2.6666666666666665 +100.83.134.158: ffn_hidden_size ................................. 4096 +100.83.134.158: finetune ........................................ False +100.83.134.158: fix_position_emb_redundant_alloc ................ False +100.83.134.158: flatten_linear_operands ......................... False +100.83.134.158: fp16 ............................................ False +100.83.134.158: fp16_lm_cross_entropy ........................... False +100.83.134.158: fp32_residual_connection ........................ False +100.83.134.158: global_batch_size ............................... 256 +100.83.134.158: hf_save ......................................... /data/output/llama13b_600M/24-04-2024-10:37:59/hf_ckpt +100.83.134.158: hidden_dropout .................................. 0.1 +100.83.134.158: hidden_size ..................................... 2048 +100.83.134.158: hidden_size_teacher ............................. None +100.83.134.158: hpu_deterministic ............................... True +100.83.134.158: hpu_fp8_format .................................. e5m2 +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: hpu_fp8_measure_interval ........................ 10 +100.83.134.158: hysteresis ...................................... 2 +100.83.134.158: ict_head_size ................................... None +100.83.134.158: ict_load ........................................ None +100.83.134.158: img_dim ......................................... 224 +100.83.134.158: indexer_batch_size .............................. 128 +100.83.134.158: indexer_log_interval ............................ 1000 +100.83.134.158: inference ....................................... False +100.83.134.158: init_method_std ................................. 0.02 +100.83.134.158: init_method_xavier_uniform ...................... False +100.83.134.158: initial_loss_scale .............................. 4294967296 +100.83.134.158: kd .............................................. False +100.83.134.158: kd_alpha_ce ..................................... 1 +100.83.134.158: kd_beta_ce ...................................... 1 +100.83.134.158: kd_temp ......................................... 1.0 +100.83.134.158: kill_switch_path ................................ None +100.83.134.158: kv_channels ..................................... 64 +100.83.134.158: layernorm_epsilon ............................... 1e-06 +100.83.134.158: layernorm_type .................................. rmsnorm +100.83.134.158: lazy_mpu_init ................................... None +100.83.134.158: load ............................................ /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 +100.83.134.158: load_teacher .................................... None +100.83.134.158: local_rank ...................................... 0 +100.83.134.158: log_batch_size_to_tensorboard ................... True +100.83.134.158: log_bwd_grads ................................... False +100.83.134.158: log_fwd_activations ............................. False +100.83.134.158: log_interval .................................... 10 +100.83.134.158: log_learning_rate_to_tensorboard ................ True +100.83.134.158: log_loss_scale_to_tensorboard ................... True +100.83.134.158: log_model_inputs ................................ False +100.83.134.158: log_num_zeros_in_grad ........................... False +100.83.134.158: log_optimizer_states_to_tensorboard ............. False +100.83.134.158: log_params_norm ................................. False +100.83.134.158: log_timers_to_tensorboard ....................... True +100.83.134.158: log_validation_ppl_to_tensorboard ............... True +100.83.134.158: loss_scale ...................................... None +100.83.134.158: loss_scale_window ............................... 1000 +100.83.134.158: lr .............................................. 0.0003 +100.83.134.158: lr_decay_iters .................................. None +100.83.134.158: lr_decay_samples ................................ None +100.83.134.158: lr_decay_style .................................. cosine +100.83.134.158: lr_decay_tokens ................................. None +100.83.134.158: lr_warmup_fraction .............................. None +100.83.134.158: lr_warmup_iters ................................. 2000 +100.83.134.158: lr_warmup_samples ............................... 0 +100.83.134.158: lr_warmup_tokens ................................ None +100.83.134.158: make_vocab_size_divisible_by .................... 128 +100.83.134.158: mask_prob ....................................... 0.15 +100.83.134.158: mask_tensor_adding .............................. False +100.83.134.158: masked_softmax_fusion ........................... False +100.83.134.158: max_position_embeddings ......................... None +100.83.134.158: memory_centric_tiled_linear ..................... False +100.83.134.158: merge_file ...................................... /data/arxiv//gpt2-merges.txt +100.83.134.158: micro_batch_size ................................ 1 +100.83.134.158: min_loss_scale .................................. 1.0 +100.83.134.158: min_lr .......................................... 0.0 +100.83.134.158: mlp_type ........................................ standard +100.83.134.158: mmap_warmup ..................................... False +100.83.134.158: moe_eval_capacity_factor ........................ 1.0 +100.83.134.158: moe_expert_parallel_size ........................ 1 +100.83.134.158: moe_loss_coeff .................................. 0.1 +100.83.134.158: moe_min_capacity ................................ 4 +100.83.134.158: moe_token_dropping .............................. True +100.83.134.158: moe_train_capacity_factor ....................... 1.0 +100.83.134.158: mos ............................................. False +100.83.134.158: no_bias ......................................... True +100.83.134.158: no_cuda ......................................... False +100.83.134.158: no_load_lr_state ................................ False +100.83.134.158: no_load_optim ................................... None +100.83.134.158: no_load_rng ..................................... None +100.83.134.158: no_pipeline_parallel ............................ False +100.83.134.158: no_save_optim ................................... None +100.83.134.158: no_save_rng ..................................... None +100.83.134.158: no_scaled_init .................................. False +100.83.134.158: num_attention_heads ............................. 32 +100.83.134.158: num_attention_heads_teacher ..................... None +100.83.134.158: num_channels .................................... 3 +100.83.134.158: num_classes ..................................... 1000 +100.83.134.158: num_experts ..................................... [1] +100.83.134.158: num_experts_teacher ............................. [1] +100.83.134.158: num_key_value_heads ............................. 32 +100.83.134.158: num_layers ...................................... 24 +100.83.134.158: num_layers_per_virtual_pipeline_stage ........... None +100.83.134.158: num_layers_teacher .............................. None +100.83.134.158: num_workers ..................................... 2 +100.83.134.158: onnx_safe ....................................... None +100.83.134.158: openai_gelu ..................................... False +100.83.134.158: optimizer ....................................... adamw +100.83.134.158: override_lr_scheduler ........................... False +100.83.134.158: params_dtype .................................... torch.bfloat16 +100.83.134.158: partition_activations ........................... False +100.83.134.158: patch_dim ....................................... 16 +100.83.134.158: pipeline_model_parallel_size .................... 2 +100.83.134.158: position_embedding_type ......................... PositionEmbeddingType.rotary +100.83.134.158: profile ......................................... None +100.83.134.158: profile_backward ................................ False +100.83.134.158: profile_steps ................................... 2,3 +100.83.134.158: query_in_block_prob ............................. 0.1 +100.83.134.158: rampup_batch_size ............................... None +100.83.134.158: rank ............................................ 0 +100.83.134.158: remote_device ................................... none +100.83.134.158: reset_attention_mask ............................ False +100.83.134.158: reset_iteration ................................. False +100.83.134.158: reset_position_ids .............................. False +100.83.134.158: retriever_report_topk_accuracies ................ [] +100.83.134.158: retriever_score_scaling ......................... False +100.83.134.158: retriever_seq_length ............................ 256 +100.83.134.158: sample_rate ..................................... 1.0 +100.83.134.158: save ............................................ /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 +100.83.134.158: save_interval ................................... 20 +100.83.134.158: scatter_gather_tensors_in_pipeline .............. True +100.83.134.158: scattered_embeddings ............................ False +100.83.134.158: seed ............................................ 1234 +100.83.134.158: seq_length ...................................... 2048 +100.83.134.158: sequence_parallel ............................... True +100.83.134.158: sgd_momentum .................................... 0.9 +100.83.134.158: short_seq_prob .................................. 0.1 +100.83.134.158: skip_train ...................................... False +100.83.134.158: split ........................................... 969, 30, 1 +100.83.134.158: split_transformers .............................. False +100.83.134.158: synchronize_each_layer .......................... False +100.83.134.158: tensor_logger_max_iter .......................... 0 +100.83.134.158: tensor_logger_path .............................. None +100.83.134.158: tensor_model_parallel_size ...................... 4 +100.83.134.158: tensorboard_dir ................................. /data/output/llama13b_600M/24-04-2024-10:37:59/tensorboard +100.83.134.158: tensorboard_log_interval ........................ 1 +100.83.134.158: tensorboard_queue_size .......................... 1000 +100.83.134.158: test_data_path .................................. None +100.83.134.158: tile_factor ..................................... 1 +100.83.134.158: titles_data_path ................................ None +100.83.134.158: tokenizer_eod_id ................................ None +100.83.134.158: tokenizer_model_file ............................ None +100.83.134.158: tokenizer_type .................................. GPT2BPETokenizer +100.83.134.158: topk ............................................ 1 +100.83.134.158: train_data_path ................................. None +100.83.134.158: train_iters ..................................... 10000 +100.83.134.158: train_samples ................................... None +100.83.134.158: train_tokens .................................... None +100.83.134.158: universal_checkpoint ............................ False +100.83.134.158: use_checkpoint_lr_scheduler ..................... False +100.83.134.158: use_contiguous_buffers_in_ddp ................... True +100.83.134.158: use_cpu_initialization .......................... None +100.83.134.158: use_fused_sdpa .................................. True +100.83.134.158: use_fused_sdpa_with_recompute ................... False +100.83.134.158: use_hpu ......................................... True +100.83.134.158: use_hpu_fp8_transformer_engine .................. False +100.83.134.158: use_hpu_graphs .................................. False +100.83.134.158: use_one_sent_docs ............................... False +100.83.134.158: use_pin_memory .................................. False +100.83.134.158: use_rotary_v2 ................................... False +100.83.134.158: use_seq_len_plus_one_tokens ..................... True +100.83.134.158: use_torch_compile ............................... False +100.83.134.158: use_tutel ....................................... False +100.83.134.158: valid_data_path ................................. None +100.83.134.158: verify_checkpoint ............................... True +100.83.134.158: verify_checkpoint_model_type .................... LLAMA +100.83.134.158: verify_tp_workers ............................... False +100.83.134.158: verify_tp_workers_hash .......................... False +100.83.134.158: virtual_pipeline_model_parallel_size ............ None +100.83.134.158: vocab_extra_ids ................................. 0 +100.83.134.158: vocab_file ...................................... /data/arxiv//gpt2-vocab.json +100.83.134.158: weight_decay .................................... 0.1 +100.83.134.158: world_size ...................................... 16 +100.83.134.158: zero_allgather_bucket_size ...................... 0.0 +100.83.134.158: zero_contigious_gradients ....................... False +100.83.134.158: zero_reduce_bucket_size ......................... 0.0 +100.83.134.158: zero_reduce_scatter ............................. False +100.83.134.158: zero_stage ...................................... 0 +100.83.134.158: -------------------- end of arguments --------------------- +100.83.134.158: setting number of micro-batches to constant 128 +100.83.134.158: setting number of micro-batches to constant 128 +100.83.134.158: > building GPT2BPETokenizer tokenizer ... +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 0 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 0 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 3 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 3 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 5 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 5 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 6 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 6 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: _initialize_distributed: Initializing with below params:args.local_rank: +100.83.134.158: 7args.local_rank: +100.83.134.158: args.world_size: 216 +100.83.134.158: +100.83.134.158: args.world_size:args.rank: 167 +100.83.134.158: +100.83.134.158: args.rank: args.distributed_backend:2 +100.83.134.158: hcclargs.distributed_backend: +100.83.134.158: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 1 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 1 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 4 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 4 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-24 10:38:09,602] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,602] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: hccl device_count: 8 +100.83.134.158: hccl device_count: 8 +100.83.134.158: > initializing torch distributed ... +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:09,603] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,603] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: hccl device_count: 8 +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-24 10:38:09,603] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: [2024-04-24 10:38:09,603] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: [2024-04-24 10:38:09,603] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +100.83.134.158: [2024-04-24 10:38:09,603] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,603] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-24 10:38:09,603] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,603] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: [2024-04-24 10:38:09,603] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,603] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-24 10:38:09,610] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,610] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-24 10:38:09,613] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-24 10:38:09,613] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed C++/CUDA extension op report +100.83.134.148: -------------------------------------------------- +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: -------------------------------------------------- +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: ------------------------------------------------------------------------------------------------------------------------------------------------------ +100.83.134.148: +100.83.134.148: DeepSpeed C++/CUDA extension op report +100.83.134.148: DeepSpeed C++/CUDA extension op report +100.83.134.148: +100.83.134.148: DeepSpeed C++/CUDA extension op report------------------------------------------------------------------------------------------------------------------------------------------------------ +100.83.134.148: +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: +100.83.134.148: -------------------------------------------------- +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.DeepSpeed C++/CUDA extension op report +100.83.134.148: ----------------------------------------------------------------------------------------------------NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: ----------------------------------------------------------------------------------------------------JIT compiled ops requires ninja--------------------------------------------------DeepSpeed C++/CUDA extension op report +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.JIT compiled ops requires ninja--------------------------------------------------JIT compiled ops requires ninja +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: --------------------------------------------------NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: +100.83.134.148: JIT compiled ops requires ninja-------------------------------------------------- +100.83.134.148: +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: ninja .................. [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: op name ................ installed .. compatible +100.83.134.148: -------------------------------------------------- +100.83.134.148: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.148: fused_adam ............. [NO] ....... [OKAY] +100.83.134.148: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.148: ninjatransformer_inferenceninja ninja...................................... [NO]..................[OKAY][OKAY] +100.83.134.148: ....... +100.83.134.148: [OKAY]-------------------------------------------------- +100.83.134.148: -------------------------------------------------- +100.83.134.148: [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: op name +100.83.134.148: op name -------------------------------------------------- ................op name +100.83.134.148: ................ ninja installedninjainstalled................ .................... .. compatibleinstalled..................compatible[OKAY] +100.83.134.148: +100.83.134.148: .. +100.83.134.148: [OKAY] ----------------------------------------------------------------------------------------------------compatible +100.83.134.148: +100.83.134.148: -------------------------------------------------- +100.83.134.148: +100.83.134.148: +100.83.134.148: --------------------------------------------------cpu_adam-------------------------------------------------- +100.83.134.148: cpu_adamop name +100.83.134.148: op name ............... ...............DeepSpeed general environment info: ................ ................ +100.83.134.148: [NO]cpu_adam[NO]installed torch install path ............... installed....... ....... .. ...............[OKAY] +100.83.134.148: [OKAY][NO] +100.83.134.148: fused_adam.......compatible .. +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/torch']fused_adam [OKAY]............. +100.83.134.148: --------------------------------------------------............. +100.83.134.148: compatible [NO] +100.83.134.148: +100.83.134.148: torch versionfused_adam [NO]-------------------------------------------------- ....... cpu_adam[OKAY] +100.83.134.148: ........................................ +100.83.134.148: cpu_adam...............[OKAY] [NO] +100.83.134.148: deepspeed_not_implemented2.1.1a0+gitb51c9f6 +100.83.134.148: [NO] deepspeed_not_implemented ...............deepspeed install path[NO] ....... .................. .......[NO][NO] [OKAY] ['/usr/local/lib/python3.10/dist-packages/deepspeed'] [OKAY] +100.83.134.148: [OKAY] +100.83.134.148: ....... +100.83.134.148: ....... +100.83.134.148: deepspeed info fused_adamdeepspeed_not_implemented [OKAY][OKAY].............transformer_inference ................... +100.83.134.148: +100.83.134.148: [NO] ..transformer_inference [NO] 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0fused_adam +100.83.134.148: .......[NO] .. deepspeed wheel compiled w. [OKAY]....... +100.83.134.148: .................... [NO][OKAY]deepspeed_not_implemented +100.83.134.148: [OKAY] +100.83.134.148: ...... --------------------------------------------------.......[NO] [NO][OKAY] transformer_inference +100.83.134.148: +100.83.134.148: ..............torch 2.1 -------------------------------------------------- +100.83.134.148: .. +100.83.134.148: [OKAY] [OKAY] +100.83.134.148: shared memory (/dev/shm) size +100.83.134.148: [NO] transformer_inference.... deepspeed_not_implemented ....... .. 503.72 GB +100.83.134.148: [OKAY][NO][NO] +100.83.134.148: .............. --------------------------------------------------[OKAY]DeepSpeed general environment info:[OKAY] +100.83.134.148: DeepSpeed general environment info: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: transformer_inferencetorch install path torch install path --------------------------------------------------............... .. +100.83.134.148: ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: [NO] torch version.......['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: [OKAY]....................DeepSpeed general environment info: +100.83.134.148: +100.83.134.148: torch version--------------------------------------------------torch install path 2.1.1a0+gitb51c9f6 +100.83.134.148: +100.83.134.148: ...................................deepspeed install path 2.1.1a0+gitb51c9f6........... +100.83.134.148: deepspeed install path['/usr/local/lib/python3.10/dist-packages/torch'] ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: ........... +100.83.134.148: deepspeed infotorch version['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: ....................................... deepspeed info0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 DeepSpeed general environment info: +100.83.134.148: 2.1.1a0+gitb51c9f6 +100.83.134.148: ...................deepspeed wheel compiled w. +100.83.134.148: 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0torch install pathdeepspeed install path...... +100.83.134.148: deepspeed wheel compiled w............ ...............torch 2.1 DeepSpeed general environment info: ...... +100.83.134.148: +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/torch']torch install pathshared memory (/dev/shm) sizedeepspeed infotorch 2.1 +100.83.134.148: .................................. +100.83.134.148: torch version.... shared memory (/dev/shm) size 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0503.72 GB ['/usr/local/lib/python3.10/dist-packages/torch'].................... +100.83.134.148: +100.83.134.148: .... +100.83.134.148: deepspeed wheel compiled w. torch version 2.1.1a0+gitb51c9f6......503.72 GB +100.83.134.148: +100.83.134.148: ....................torch 2.1 deepspeed install path +100.83.134.148: 2.1.1a0+gitb51c9f6...........shared memory (/dev/shm) size +100.83.134.148: deepspeed install path['/usr/local/lib/python3.10/dist-packages/deepspeed'].... +100.83.134.148: ...........deepspeed info503.72 GB +100.83.134.148: ...................['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: deepspeed info deepspeed wheel compiled w.................... ......0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 torch 2.1 +100.83.134.148: +100.83.134.148: deepspeed wheel compiled w.shared memory (/dev/shm) size .......... torch 2.1 503.72 GB +100.83.134.148: +100.83.134.148: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown ******** Git info for Megatron: git_hash=unknown git_branch=unknown ******** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: +100.83.134.148: +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown ******** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed C++/CUDA extension op report +100.83.134.148: -------------------------------------------------- +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: -------------------------------------------------- +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed C++/CUDA extension op report +100.83.134.148: -------------------------------------------------- +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: -------------------------------------------------- +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: ninja .................. [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: op name ................ installed .. compatible +100.83.134.148: -------------------------------------------------- +100.83.134.148: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.148: fused_adam ............. [NO] ....... [OKAY] +100.83.134.148: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.148: transformer_inference .. [NO] ....... [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed general environment info: +100.83.134.148: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.148: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.148: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.148: ninja .................. [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: op name ................ installed .. compatible +100.83.134.148: -------------------------------------------------- +100.83.134.148: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.148: fused_adam ............. [NO] ....... [OKAY] +100.83.134.148: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.148: transformer_inference .. [NO] ....... [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed general environment info: +100.83.134.148: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.148: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.148: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 1 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 9 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 4 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 12 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 2 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 10 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 3 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 11 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 6 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 14 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 0 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 8 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 5 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 13 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: > setting tensorboard ... +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 7 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 15 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: hccl device_count: 8 +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-24 10:38:09,922] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,922] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,923] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,923] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,923] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,923] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,923] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: [2024-04-24 10:38:09,923] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,924] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,924] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,966] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,966] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,966] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,966] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-24 10:38:09,967] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-24 10:38:09,967] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: > initializing tensor model parallel with size 4 +100.83.134.158: > initializing pipeline model parallel with size 2 +100.83.134.158: > setting random seeds to 1234 ... +100.83.134.158: > initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +100.83.134.158: ============================= HABANA PT BRIDGE CONFIGURATION =========================== +100.83.134.158: PT_HPU_LAZY_MODE = 1 +100.83.134.158: PT_RECIPE_CACHE_PATH = +100.83.134.158: PT_CACHE_FOLDER_DELETE = 0 +100.83.134.158: PT_HPU_RECIPE_CACHE_CONFIG = +100.83.134.158: PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807 +100.83.134.158: PT_HPU_LAZY_ACC_PAR_MODE = 0 +100.83.134.158: PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0 +100.83.134.158: ---------------------------: System Configuration :--------------------------- +100.83.134.158: Num CPU Cores : 160 +100.83.134.158: CPU RAM : 1056375308 KB +100.83.134.158: ------------------------------------------------------------------------------ +100.83.134.158: > compiling dataset index builder ... +100.83.134.158: make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.158: g++ -O3 -Wall -shared -std=c++11 -fPIC -fdiagnostics-color -I/usr/include/python3.10 -I/usr/local/lib/python3.10/dist-packages/pybind11/include helpers.cpp -o helpers.cpython-310-x86_64-linux-gnu.so +100.83.134.148: > compiling dataset index builder ... +100.83.134.148: make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.148: g++ -O3 -Wall -shared -std=c++11 -fPIC -fdiagnostics-color -I/usr/include/python3.10 -I/usr/local/lib/python3.10/dist-packages/pybind11/include helpers.cpp -o helpers.cpython-310-x86_64-linux-gnu.so +100.83.134.158: make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.158: >>> done with dataset index builder. Compilation time: 4.701 seconds +100.83.134.158: WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +100.83.134.158: > compiling and loading fused kernels ... +100.83.134.148: make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.148: >>> done with dataset index builder. Compilation time: 5.028 seconds +100.83.134.158: >>> done with compiling and loading fused kernels. Compilation time: 0.633 seconds +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-2gvu06me +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run true-salad-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/2gvu06me +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-a9p61iy4 +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run prime-donkey-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/a9p61iy4 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-hawf4qag +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run lyric-totem-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/hawf4qag +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-1lf6hrcg +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-k8bmvdda +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-yfi6p93o +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-9q3cubkd +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run rose-eon-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/1lf6hrcg +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Syncing run avid-darkness-2052 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-wulfqwkg +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/yfi6p93o +100.83.134.148: wandb: Syncing run laced-bee-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/k8bmvdda +100.83.134.148: wandb: Syncing run sweet-grass-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/9q3cubkd +100.83.134.148: wandb: Syncing run frosty-valley-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/wulfqwkg +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-ljwbq5pp +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run glowing-dust-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ljwbq5pp +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-hqeqzovu +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run driven-surf-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/hqeqzovu +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-33j1g38a +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run lilac-aardvark-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/33j1g38a +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-8jj6i7a9 +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run logical-fire-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/8jj6i7a9 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-kdru3pwo +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run wobbly-fire-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/kdru3pwo +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-ydlgw8i5 +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run bumbling-firebrand-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ydlgw8i5 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-wlyb6qye +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run super-yogurt-2052 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/wlyb6qye +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_103821-r42kdunw +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run dainty-brook-2052 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/r42kdunw +100.83.134.158: time to initialize megatron (seconds): 31.414 +100.83.134.158: [after megatron is initialized] datetime: 2024-04-24 10:38:23 +100.83.134.158: building LLaMA model ... +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 151830528 +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 151832576 +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: > number of parameters on (tensor, pipeline) model parallel rank (2, 1): 151832576 +100.83.134.148: > number of parameters on (tensor, pipeline) model parallel rank (3, 1): 151832576 +100.83.134.158: > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 151830528 > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 151830528 +100.83.134.148: > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 151832576 +100.83.134.158: +100.83.134.158: [2024-04-24 10:38:23,523] [INFO] [utils.py:824:see_memory_usage] Before Building Model +100.83.134.158: [2024-04-24 10:38:23,527] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:23,527] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.52 GB, percent = 43.5% +100.83.134.158: SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +100.83.134.158: Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=0, model=1): 1, ProcessCoord(pipe=0, data=0, model=2): 2, ProcessCoord(pipe=0, data=0, model=3): 3, ProcessCoord(pipe=0, data=1, model=0): 4, ProcessCoord(pipe=0, data=1, model=1): 5, ProcessCoord(pipe=0, data=1, model=2): 6, ProcessCoord(pipe=0, data=1, model=3): 7, ProcessCoord(pipe=1, data=0, model=0): 8, ProcessCoord(pipe=1, data=0, model=1): 9, ProcessCoord(pipe=1, data=0, model=2): 10, ProcessCoord(pipe=1, data=0, model=3): 11, ProcessCoord(pipe=1, data=1, model=0): 12, ProcessCoord(pipe=1, data=1, model=1): 13, ProcessCoord(pipe=1, data=1, model=2): 14, ProcessCoord(pipe=1, data=1, model=3): 15} +100.83.134.158: [2024-04-24 10:38:23,530] [INFO] [module.py:375:_partition_layers] Partitioning pipeline stages with method type:transformer +100.83.134.158: stage=0 layers=15 +100.83.134.158: 0: _to_float16 +100.83.134.158: 1: EmbeddingPipe +100.83.134.158: 2: +100.83.134.158: 3: ParallelTransformerLayerPipe +100.83.134.158: 4: ParallelTransformerLayerPipe +100.83.134.158: 5: ParallelTransformerLayerPipe +100.83.134.158: 6: ParallelTransformerLayerPipe +100.83.134.158: 7: ParallelTransformerLayerPipe +100.83.134.158: 8: ParallelTransformerLayerPipe +100.83.134.158: 9: ParallelTransformerLayerPipe +100.83.134.158: 10: ParallelTransformerLayerPipe +100.83.134.158: 11: ParallelTransformerLayerPipe +100.83.134.158: 12: ParallelTransformerLayerPipe +100.83.134.158: 13: ParallelTransformerLayerPipe +100.83.134.158: 14: ParallelTransformerLayerPipe +100.83.134.158: stage=1 layers=17 +100.83.134.158: 15: ParallelTransformerLayerPipe +100.83.134.158: 16: ParallelTransformerLayerPipe +100.83.134.158: 17: ParallelTransformerLayerPipe +100.83.134.158: 18: ParallelTransformerLayerPipe +100.83.134.158: 19: ParallelTransformerLayerPipe +100.83.134.158: 20: ParallelTransformerLayerPipe +100.83.134.158: 21: ParallelTransformerLayerPipe +100.83.134.158: 22: ParallelTransformerLayerPipe +100.83.134.158: 23: ParallelTransformerLayerPipe +100.83.134.158: 24: ParallelTransformerLayerPipe +100.83.134.158: 25: ParallelTransformerLayerPipe +100.83.134.158: 26: ParallelTransformerLayerPipe +100.83.134.158: 27: +100.83.134.158: 28: WrapName +100.83.134.158: 29: WrapName +100.83.134.158: 30: +100.83.134.158: 31: float16_to_fp32 +100.83.134.158: loss: CrossEntropy +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: [2024-04-24 10:38:23,665] [INFO] [utils.py:824:see_memory_usage] After Building Model +100.83.134.158: [2024-04-24 10:38:23,668] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:23,669] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.58 GB, percent = 43.5% +100.83.134.158: > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 151830528 +100.83.134.158: > learning rate decay style: cosine +100.83.134.158: DeepSpeed is enabled. +100.83.134.158: [2024-04-24 10:38:23,672] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.4+hpu.synapse.v1.14.0, git-hash=fad45b2, git-branch=1.14.0 +100.83.134.158: [2024-04-24 10:38:23,919] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:23,919] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:23,940] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:23,940] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:23,999] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False +100.83.134.158: [2024-04-24 10:38:23,999] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer +100.83.134.158: [2024-04-24 10:38:24,000] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer +100.83.134.158: [2024-04-24 10:38:24,001] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW +100.83.134.158: [2024-04-24 10:38:24,001] [INFO] [logging.py:96:log_dist] [Rank 0] Creating BF16 optimizer +100.83.134.158: [2024-04-24 10:38:24,064] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:24,076] [INFO] [utils.py:824:see_memory_usage] begin bf16_optimizer +100.83.134.158: [2024-04-24 10:38:24,080] [INFO] [utils.py:825:see_memory_usage] MA 0.29 GB Max_MA 0.31 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,080] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.64 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,145] [INFO] [utils.py:824:see_memory_usage] before initializing group 0 +100.83.134.158: [2024-04-24 10:38:24,149] [INFO] [utils.py:825:see_memory_usage] MA 0.29 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,149] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.65 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,257] [INFO] [utils.py:824:see_memory_usage] after initializing group 0 +100.83.134.158: [2024-04-24 10:38:24,261] [INFO] [utils.py:825:see_memory_usage] MA 0.29 GB Max_MA 0.58 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,261] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.65 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,318] [INFO] [utils.py:824:see_memory_usage] before initializing group 1 +100.83.134.158: [2024-04-24 10:38:24,321] [INFO] [utils.py:825:see_memory_usage] MA 0.29 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,322] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.65 GB, percent = 43.5% +100.83.134.148: [2024-04-24 10:38:24,338] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:24,338] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:24,364] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:24,366] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:24,369] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:24,371] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:24,391] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:24,392] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:24,392] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-24 10:38:24,392] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:24,410] [INFO] [utils.py:824:see_memory_usage] after initializing group 1 +100.83.134.158: [2024-04-24 10:38:24,414] [INFO] [utils.py:825:see_memory_usage] MA 1.14 GB Max_MA 1.14 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,414] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.71 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,490] [INFO] [utils.py:824:see_memory_usage] before initialize_optimizer +100.83.134.158: [2024-04-24 10:38:24,493] [INFO] [utils.py:825:see_memory_usage] MA 1.14 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,494] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.72 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,554] [INFO] [utils.py:824:see_memory_usage] end initialize_optimizer +100.83.134.158: [2024-04-24 10:38:24,557] [INFO] [utils.py:825:see_memory_usage] MA 1.14 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,557] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.72 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,621] [INFO] [utils.py:824:see_memory_usage] end bf16_optimizer +100.83.134.158: [2024-04-24 10:38:24,625] [INFO] [utils.py:825:see_memory_usage] MA 1.14 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-24 10:38:24,625] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.72 GB, percent = 43.5% +100.83.134.158: [2024-04-24 10:38:24,626] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = BF16_Optimizer +100.83.134.158: [2024-04-24 10:38:24,626] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler +100.83.134.158: [2024-04-24 10:38:24,626] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = +100.83.134.158: [2024-04-24 10:38:24,626] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:992:print] DeepSpeedEngine configuration: +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] activation_checkpointing_config { +100.83.134.158: "partition_activations": false, +100.83.134.158: "contiguous_memory_optimization": false, +100.83.134.158: "cpu_checkpointing": false, +100.83.134.158: "number_checkpoints": null, +100.83.134.158: "synchronize_checkpoint_boundary": false, +100.83.134.158: "profile": false +100.83.134.158: } +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] amp_enabled .................. False +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] amp_params ................... False +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] autotuning_config ............ { +100.83.134.158: "enabled": false, +100.83.134.158: "start_step": null, +100.83.134.158: "end_step": null, +100.83.134.158: "metric_path": null, +100.83.134.158: "arg_mappings": null, +100.83.134.158: "metric": "throughput", +100.83.134.158: "model_info": null, +100.83.134.158: "results_dir": "autotuning_results", +100.83.134.158: "exps_dir": "autotuning_exps", +100.83.134.158: "overwrite": true, +100.83.134.158: "fast": true, +100.83.134.158: "start_profile_step": 3, +100.83.134.158: "end_profile_step": 5, +100.83.134.158: "tuner_type": "gridsearch", +100.83.134.158: "tuner_early_stopping": 5, +100.83.134.158: "tuner_num_trials": 50, +100.83.134.158: "model_info_path": null, +100.83.134.158: "mp_size": 1, +100.83.134.158: "max_train_batch_size": null, +100.83.134.158: "min_train_batch_size": 1, +100.83.134.158: "max_train_micro_batch_size_per_gpu": 1.024000e+03, +100.83.134.158: "min_train_micro_batch_size_per_gpu": 1, +100.83.134.158: "num_tuning_micro_batch_sizes": 3 +100.83.134.158: } +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] bfloat16_accumulate_grads_via_hooks True +100.83.134.158: [2024-04-24 10:38:24,627] [INFO] [config.py:996:print] bfloat16_enabled ............. True +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] checkpoint_parallel_write_pipeline False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] checkpoint_tag_validation_enabled True +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] checkpoint_tag_validation_fail False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] comms_config ................. +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] communication_data_type ...... None +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] curriculum_enabled_legacy .... False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] curriculum_params_legacy ..... False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] data_efficiency_enabled ...... False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] dataloader_drop_last ......... False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] disable_allgather ............ False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] dump_state ................... False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] dynamic_loss_scale_args ...... None +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] eigenvalue_enabled ........... False +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] eigenvalue_gas_boundary_resolution 1 +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] eigenvalue_layer_name ........ bert.encoder.layer +100.83.134.158: [2024-04-24 10:38:24,628] [INFO] [config.py:996:print] eigenvalue_layer_num ......... 0 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] eigenvalue_max_iter .......... 100 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] eigenvalue_stability ......... 1e-06 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] eigenvalue_tol ............... 0.01 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] eigenvalue_verbose ........... False +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] elasticity_enabled ........... False +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] flops_profiler_config ........ { +100.83.134.158: "enabled": false, +100.83.134.158: "recompute_fwd_factor": 0.0, +100.83.134.158: "profile_step": 1, +100.83.134.158: "module_depth": -1, +100.83.134.158: "top_modules": 1, +100.83.134.158: "detailed": true, +100.83.134.158: "output_file": null +100.83.134.158: } +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] fp16_auto_cast ............... None +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] fp16_enabled ................. False +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] fp16_master_weights_and_gradients False +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] global_rank .................. 0 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] grad_accum_dtype ............. None +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] gradient_accumulation_steps .. 128 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] gradient_clipping ............ 1.0 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] gradient_predivide_factor .... 1.0 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] initial_dynamic_scale ........ 1 +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] load_universal_checkpoint .... False +100.83.134.158: [2024-04-24 10:38:24,629] [INFO] [config.py:996:print] loss_scale ................... 1.0 +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] memory_breakdown ............. False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] mics_hierarchial_params_gather False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] mics_shard_size .............. -1 +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] nebula_config ................ { +100.83.134.158: "enabled": false, +100.83.134.158: "persistent_storage_path": null, +100.83.134.158: "persistent_time_interval": 100, +100.83.134.158: "num_of_version_in_retention": 2, +100.83.134.158: "enable_nebula_load": true, +100.83.134.158: "load_path": null +100.83.134.158: } +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] optimizer_legacy_fusion ...... False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] optimizer_name ............... None +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] optimizer_params ............. None +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': False, 'grad_partitioned': False} +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] pld_enabled .................. False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] pld_params ................... False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] prescale_gradients ........... False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] scheduler_name ............... None +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] scheduler_params ............. None +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] seq_parallel_communication_data_type torch.float32 +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] sparse_attention ............. None +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] sparse_gradients_enabled ..... False +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] steps_per_print .............. 10 +100.83.134.158: [2024-04-24 10:38:24,630] [INFO] [config.py:996:print] train_batch_size ............. 256 +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] train_micro_batch_size_per_gpu 1 +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] use_data_before_expert_parallel_ False +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] use_node_local_storage ....... False +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] wall_clock_breakdown ......... False +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] weight_quantization_config ... None +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] world_size ................... 2 +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] zero_allow_comm_data_type_fp32 False +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] zero_allow_untested_optimizer False +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] zero_config .................. stage=0 contiguous_gradients=True reduce_scatter=False reduce_bucket_size=500,000,000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False use_all_reduce_for_fetch_params=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] zero_enabled ................. False +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] zero_force_ds_cpu_optimizer .. True +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:996:print] zero_optimization_stage ...... 0 +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [config.py:982:print_user_config] json = { +100.83.134.158: "train_batch_size": 256, +100.83.134.158: "train_micro_batch_size_per_gpu": 1, +100.83.134.158: "steps_per_print": 10, +100.83.134.158: "gradient_clipping": 1.0, +100.83.134.158: "zero_optimization": { +100.83.134.158: "stage": 0 +100.83.134.158: }, +100.83.134.158: "bf16": { +100.83.134.158: "enabled": true, +100.83.134.158: "accumulate_grads_via_hooks": true +100.83.134.158: }, +100.83.134.158: "fp16": { +100.83.134.158: "enabled": false +100.83.134.158: }, +100.83.134.158: "wall_clock_breakdown": false, +100.83.134.158: "pipeline": { +100.83.134.158: "pipe_partitioned": false, +100.83.134.158: "grad_partitioned": false +100.83.134.158: } +100.83.134.158: } +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [engine.py:99:__init__] CONFIG: micro_batches=128 micro_batch_size=1 +100.83.134.158: [2024-04-24 10:38:24,631] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-24 10:38:25,366] [INFO] [engine.py:180:__init__] RANK=0 STAGE=0 LAYERS=15 [0, 15) STAGE_PARAMS=151830528 (151.831M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.158: [2024-04-24 10:38:25,366] [INFO] [engine.py:180:__init__] RANK=3 STAGE=0 LAYERS=15 [0, 15) STAGE_PARAMS=151830528 (151.831M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.158: [2024-04-24 10:38:25,366] [INFO] [engine.py:180:__init__] RANK=1 STAGE=0 LAYERS=15 [0, 15) STAGE_PARAMS=151830528 (151.831M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.158: [2024-04-24 10:38:25,366] [INFO] [engine.py:180:__init__] RANK=2 STAGE=0 LAYERS=15 [0, 15) STAGE_PARAMS=151830528 (151.831M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.148: [2024-04-24 10:38:25,370] [INFO] [engine.py:180:__init__] RANK=8 STAGE=1 LAYERS=17 [15, 32) STAGE_PARAMS=151832576 (151.833M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.148: [2024-04-24 10:38:25,370] [INFO] [engine.py:180:__init__] RANK=10 STAGE=1 LAYERS=17 [15, 32) STAGE_PARAMS=151832576 (151.833M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.148: [2024-04-24 10:38:25,370] [INFO] [engine.py:180:__init__] RANK=11 STAGE=1 LAYERS=17 [15, 32) STAGE_PARAMS=151832576 (151.833M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.148: [2024-04-24 10:38:25,370] [INFO] [engine.py:180:__init__] RANK=9 STAGE=1 LAYERS=17 [15, 32) STAGE_PARAMS=151832576 (151.833M) TOTAL_PARAMS=1214652416 (1214.652M) UNIQUE_PARAMS=1214652416 (1214.652M) +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-24 10:38:25,373] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,370] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,370] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,370] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,370] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,370] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,370] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: WARNING: could not find the metadata file /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 +100.83.134.158: will not load any checkpoints and will start from random +100.83.134.148: [2024-04-24 10:38:25,374] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,371] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-24 10:38:25,371] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: time (ms) | load-checkpoint: 2.70 +100.83.134.158: [after model, optimizer, and learning rate scheduler are built] datetime: 2024-04-24 10:38:25 +100.83.134.158: > building train, validation, and test datasets ... +100.83.134.158: > datasets target sizes (minimum size): +100.83.134.158: train: 2560000 +100.83.134.158: validation: 1282560 +100.83.134.158: test: 2560 +100.83.134.158: > building train, validation, and test datasets for GPT ... +100.83.134.158: Single data path provided for train, valid & test +100.83.134.158: > building dataset index ... +100.83.134.158: reading sizes... +100.83.134.158: reading pointers... +100.83.134.158: reading document index... +100.83.134.158: creating numpy buffer of mmap... +100.83.134.158: creating memory view of numpy buffer... +100.83.134.158: > finished creating indexed dataset in 0.000572 seconds +100.83.134.158: number of documents: 1558306 +100.83.134.158: > dataset split: +100.83.134.158: train: +100.83.134.158: document indices in [0, 1509999) total of 1509999 documents +100.83.134.158: validation: +100.83.134.158: document indices in [1509999, 1556748) total of 46749 documents +100.83.134.158: test: +100.83.134.158: document indices in [1556748, 1558306) total of 1558 documents +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: loaded indexed file in 0.002 seconds +100.83.134.158: total number of samples: 15244235 +100.83.134.158: total number of epochs: 1 +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: +100.83.134.158: > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: loaded indexed file in 0.001 seconds +100.83.134.158: total number of samples: 1443484 +100.83.134.158: total number of epochs: 3 +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: loaded indexed file in 0.001 seconds +100.83.134.158: total number of samples: 16581 +100.83.134.158: total number of epochs: 1 +100.83.134.158: > finished creating GPT datasets ... +100.83.134.148: time (ms) | model-and-optimizer-setup: 1956.40 | train/valid/test-data-iterators-setup: 1587.35 +100.83.134.158: [after dataloaders are built] datetime: 2024-04-24 10:38:27 +100.83.134.158: done with setup ... +100.83.134.158: training ... +100.83.134.158: [before the start of training step] datetime: 2024-04-24 10:38:27 +100.83.134.158: [2024-04-24 14:03:37,049] [INFO] [logging.py:96:log_dist] [Rank 0] step=10, skipped=0, lr=[1.4999999999999998e-06, 1.4999999999999998e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: [Rank 1] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0[Rank 3] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.158: +100.83.134.148: [Rank 11] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0[Rank 9] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.148: +100.83.134.158: steps: 10 loss: 10.9455 iter time (s): 1231.017 samples/sec: 0.208 +100.83.134.148: iteration 10/ 10000 | consumed samples: 2560 | consumed tokens: 5242880 | elapsed time per iteration (ms): 1230993.0 | learning rate: 1.500E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 0.208 | TFLOPs: 0.24 | +100.83.134.158: [Rank 2] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.148: [Rank 10] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.158: [Rank 0] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.148: [Rank 8] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.158: [2024-04-24 17:30:06,287] [INFO] [logging.py:96:log_dist] [Rank 0] step=20, skipped=0, lr=[2.9999999999999997e-06, 2.9999999999999997e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 20 loss: 9.1008 iter time (s): 1238.924 samples/sec: 0.207 +100.83.134.148: iteration 20/ 10000 | consumed samples: 5120 | consumed tokens: 10485760 | elapsed time per iteration (ms): 1238923.2 | learning rate: 3.000E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 0.207 | TFLOPs: 0.24 | +100.83.134.148: 2024-04-24 17:30:06 Start last rank evaluation +100.83.134.158: Evaluating iter 10/10 +100.83.134.148: -------------------------------------------------------------------------------------------------------------------- +100.83.134.148: 2024-04-24 17:31:05 | validation loss at iteration 20 | lm loss value: 8.887300E+00 | lm loss PPL: 7.239443E+03 | +100.83.134.148: -------------------------------------------------------------------------------------------------------------------- +100.83.134.158: saving checkpoint at iteration 20 to /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 +100.83.134.158: [2024-04-24 17:31:05,586] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step20 is about to be saved! +100.83.134.158: [2024-04-24 17:31:05,616] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,620] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,621] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,622] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,623] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,626] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,639] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,643] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,662] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,677] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,688] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,695] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,705] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,710] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,716] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,719] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_01-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,723] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,733] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,731] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,739] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,741] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,747] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,746] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,755] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,753] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,757] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,766] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,768] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,778] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_15-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,781] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,789] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,788] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,790] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,790] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,793] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,802] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,800] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,812] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,815] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,813] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,818] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,824] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_03-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,829] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,835] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,836] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,834] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,844] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,850] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,857] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,857] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,859] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,878] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,901] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_16-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,908] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,912] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,928] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,933] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,937] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,948] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_04-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,953] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:05,954] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,967] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,971] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:05,971] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,977] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:05,977] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,989] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:05,991] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,005] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,029] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,027] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_17-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,052] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,059] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,061] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,081] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_05-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,079] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,080] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,088] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,091] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,092] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_18-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,103] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,111] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,111] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,112] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,113] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,114] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,116] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,121] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,129] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,130] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,136] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,135] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,144] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,155] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,168] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,190] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,218] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_06-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,223] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,238] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,239] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,241] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,242] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,246] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,249] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,252] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_19-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,264] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,263] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,267] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,266] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,271] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,269] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,281] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,288] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,293] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,334] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_07-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,355] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,379] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,379] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,383] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,389] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,392] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,397] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,397] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,404] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,407] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,408] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_20-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,408] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,418] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,424] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,433] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,453] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_08-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,474] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,514] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,514] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,517] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,532] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,532] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,540] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,539] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,543] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,549] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,552] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_21-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,559] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,563] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_09-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,563] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,569] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,579] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,584] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,635] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,649] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,653] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_03-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,654] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_02-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,669] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,678] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,679] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,682] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_03_model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,684] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,685] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,688] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_10-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,690] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_03_model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,690] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,693] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:06,694] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:06,699] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,703] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_22-model_03-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,706] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,704] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,708] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_01-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,709] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,706] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,725] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,725] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,733] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,744] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,744] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,755] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,765] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,770] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,773] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_23-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,784] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,788] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,790] [INFO] [logging.py:96:log_dist] [Rank 1] Saving model checkpoint: /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_01_model_states.pt +100.83.134.148: [2024-04-24 17:31:06,787] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_00-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,790] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_01_model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,789] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,791] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,792] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,795] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_11-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,797] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,795] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_01-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,799] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_02_model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,801] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_01_model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,803] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:06,805] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:06,807] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_02_model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,809] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:06,810] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:06,819] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,818] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,820] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,831] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,831] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_24-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,839] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,847] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,850] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,854] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,858] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,877] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,880] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_25-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,894] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:06,897] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_12-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,895] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,899] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_04_model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,902] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_01-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,911] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_02-model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,914] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,912] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_04_model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,920] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_04_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:06,922] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,925] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:06,926] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,927] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,940] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_26-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,944] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,946] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_28-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,962] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_03-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,970] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_01-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,975] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_05_model_states.pt... +100.83.134.158: [2024-04-24 17:31:06,987] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_13-model_00-model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,984] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_05_model_states.pt. +100.83.134.148: [2024-04-24 17:31:06,986] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_02-model_states.pt... +100.83.134.148: [2024-04-24 17:31:06,987] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_05_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:06,989] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:07,004] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_00-model_states.pt... +100.83.134.148: [2024-04-24 17:31:07,016] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_03-model_states.pt. +100.83.134.148: [2024-04-24 17:31:07,019] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_07_model_states.pt... +100.83.134.148: [2024-04-24 17:31:07,028] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_07_model_states.pt. +100.83.134.148: [2024-04-24 17:31:07,030] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:07,031] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:07,044] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_29-model_02-model_states.pt. +100.83.134.148: [2024-04-24 17:31:07,049] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_06_model_states.pt... +100.83.134.148: [2024-04-24 17:31:07,058] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_06_model_states.pt. +100.83.134.148: [2024-04-24 17:31:07,061] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:07,063] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:07,081] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/layer_14-model_00-model_states.pt. +100.83.134.158: [2024-04-24 17:31:07,084] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_00_model_states.pt +100.83.134.158: [2024-04-24 17:31:07,084] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_00_model_states.pt... +100.83.134.158: [2024-04-24 17:31:07,093] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/mp_rank_00_model_states.pt. +100.83.134.158: [2024-04-24 17:31:07,096] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:07,097] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,214] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,214] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,258] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,258] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,313] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,314] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,374] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_04_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,374] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_04_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,401] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,402] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,403] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,404] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,407] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,407] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,493] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,493] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,498] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,499] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,564] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,565] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,597] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,597] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,603] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,603] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,612] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,612] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,639] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,640] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt... +100.83.134.148: [2024-04-24 17:31:08,670] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_05_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:08,670] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_05_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:08,708] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:08,709] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-24 17:31:09,951] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:09,951] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt +100.83.134.158: [2024-04-24 17:31:09,951] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:10,008] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:10,009] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt +100.83.134.158: [2024-04-24 17:31:10,009] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:10,101] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:10,101] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt +100.83.134.158: [2024-04-24 17:31:10,102] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:10,190] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:10,190] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_07_optim_states.pt +100.83.134.148: [2024-04-24 17:31:10,190] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:10,290] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:10,290] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_06_optim_states.pt +100.83.134.148: [2024-04-24 17:31:10,291] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:10,423] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:10,423] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt +100.83.134.158: [2024-04-24 17:31:10,423] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:10,459] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:10,459] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-24 17:31:10,459] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:10,590] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:10,591] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt +100.83.134.158: [2024-04-24 17:31:10,591] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:10,636] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_04_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:10,637] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_04_optim_states.pt +100.83.134.148: [2024-04-24 17:31:10,637] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:10,673] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:10,673] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt +100.83.134.158: [2024-04-24 17:31:10,674] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: [2024-04-24 17:31:11,171] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-24 17:31:11,173] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-24 17:31:11,174] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:11,828] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:11,828] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_07_optim_states.pt +100.83.134.148: [2024-04-24 17:31:11,828] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:11,838] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:11,838] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_04_optim_states.pt +100.83.134.148: [2024-04-24 17:31:11,838] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:12,558] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_05_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:12,558] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_05_optim_states.pt +100.83.134.148: [2024-04-24 17:31:12,558] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:12,616] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:12,617] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_1_mp_rank_06_optim_states.pt +100.83.134.148: [2024-04-24 17:31:12,617] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.148: [2024-04-24 17:31:12,625] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt. +100.83.134.148: [2024-04-24 17:31:12,625] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2/global_step20/bf16_zero_pp_rank_0_mp_rank_05_optim_states.pt +100.83.134.148: [2024-04-24 17:31:12,625] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step20 is ready now! +100.83.134.158: successfully saved checkpoint at iteration 20 to /data/output/llama13b_600M/24-04-2024-10:37:59/checkpoints_zero_stage_2 +100.83.134.158: 3D configuration: DP=2 TP=4 PP=2 +100.83.134.158: +100.83.134.158: Verify ** layer_ ** files +100.83.134.158: +100.83.134.158: Checking pp_stage=0 +100.83.134.158: 3.input_layernorm.weight: OK [n=4] +100.83.134.158: 3.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 4.input_layernorm.weight: OK [n=4] +100.83.134.158: 4.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 5.input_layernorm.weight: OK [n=4] +100.83.134.158: 5.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 6.input_layernorm.weight: OK [n=4] +100.83.134.158: 6.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 7.input_layernorm.weight: OK [n=4] +100.83.134.158: 7.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 8.input_layernorm.weight: OK [n=4] +100.83.134.158: 8.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 9.input_layernorm.weight: OK [n=4] +100.83.134.158: 9.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 10.input_layernorm.weight: OK [n=4] +100.83.134.158: 10.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 11.input_layernorm.weight: OK [n=4] +100.83.134.158: 11.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 12.input_layernorm.weight: OK [n=4] +100.83.134.158: 12.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 13.input_layernorm.weight: OK [n=4] +100.83.134.158: 13.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 14.input_layernorm.weight: OK [n=4] +100.83.134.158: 14.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: +100.83.134.158: Checking pp_stage=1 +100.83.134.158: 15.input_layernorm.weight: OK [n=4] +100.83.134.158: 15.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 16.input_layernorm.weight: OK [n=4] +100.83.134.158: 16.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 17.input_layernorm.weight: OK [n=4] +100.83.134.158: 17.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 18.input_layernorm.weight: OK [n=4] +100.83.134.158: 18.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 19.input_layernorm.weight: OK [n=4] +100.83.134.158: 19.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 20.input_layernorm.weight: OK [n=4] +100.83.134.158: 20.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 21.input_layernorm.weight: OK [n=4] +100.83.134.158: 21.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 22.input_layernorm.weight: OK [n=4] +100.83.134.158: 22.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 23.input_layernorm.weight: OK [n=4] +100.83.134.158: 23.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 24.input_layernorm.weight: OK [n=4] +100.83.134.158: 24.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 25.input_layernorm.weight: OK [n=4] +100.83.134.158: 25.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 26.input_layernorm.weight: OK [n=4] +100.83.134.158: 26.post_attention_layernorm.weight: OK [n=4] +100.83.134.158: 28.final_rmsnorm.weight: OK [n=4] +100.83.134.158: +100.83.134.158: Checkpoint layer files OK +100.83.134.158: +100.83.134.158: Verify ** bf16_zero_ ** files +100.83.134.158: +100.83.134.158: Checking pp_stage=0 dp_stage=0 +100.83.134.158: bf16 zero files: 0%| | 0/4 [00:00, {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.134.158: [2024-04-26 09:36:18,643] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.134.158: [2024-04-26 09:36:18,643] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: * Starting OpenBSD Secure Shell server sshd +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.158: ...done. +100.83.134.148: [2024-04-26 09:36:18,877] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.134.148: [2024-04-26 09:36:18,877] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.134.148: [2024-04-26 09:36:18,877] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.134.148': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.134.148: [2024-04-26 09:36:18,877] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.134.148: [2024-04-26 09:36:18,877] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: * Starting OpenBSD Secure Shell server sshd +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.148: ...done. +100.83.134.158: [2024-04-26 09:36:20,372] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-26 09:36:20,378] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: [2024-04-26 09:36:20,395] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-26 09:36:20,399] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-26 09:36:20,406] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: [2024-04-26 09:36:20,417] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-26 09:36:20,588] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.148: [2024-04-26 09:36:20,720] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:20,722] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: [2024-04-26 09:36:20,722] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:20,722] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:20,752] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:20,753] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: [2024-04-26 09:36:20,754] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:20,796] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.158: [2024-04-26 09:36:20,834] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 7 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 7 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op reportcpu_adam +100.83.134.158: ............... --------------------------------------------------[NO] +100.83.134.158: .......NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. [OKAY] +100.83.134.158: +100.83.134.158: -------------------------------------------------- +100.83.134.158: fused_adam JIT compiled ops requires ninja............. +100.83.134.158: [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: using world size: 16, data-parallel-size: 16, tensor-model-parallel size: 1, pipeline-model-parallel size: 1 +100.83.134.158: accumulate and all-reduce gradients in fp32 for bfloat16 data type. +100.83.134.158: using torch.bfloat16 for parameters ... +100.83.134.158: ------------------------ arguments ------------------------ +100.83.134.158: accumulate_allreduce_grads_in_fp32 .............. True +100.83.134.158: activation_func_type ............................ swiglu +100.83.134.158: adam_beta1 ...................................... 0.9 +100.83.134.158: adam_beta2 ...................................... 0.95 +100.83.134.158: adam_eps ........................................ 1e-06 +100.83.134.158: adlr_autoresume ................................. False +100.83.134.158: adlr_autoresume_interval ........................ 1000 +100.83.134.158: aml_data_download_path .......................... None +100.83.134.158: apply_layernorm_weight_plus_one ................. False +100.83.134.158: apply_query_key_layer_scaling ................... True +100.83.134.158: apply_residual_connection_post_layernorm ........ False +100.83.134.158: attention_dropout ............................... 0.1 +100.83.134.158: attention_softmax_in_fp32 ....................... False +100.83.134.158: bert_binary_head ................................ True +100.83.134.158: bert_load ....................................... None +100.83.134.158: bf16 ............................................ True +100.83.134.158: bias_dropout_fusion ............................. False +100.83.134.158: bias_gelu_fusion ................................ False +100.83.134.158: biencoder_projection_dim ........................ 0 +100.83.134.158: biencoder_shared_query_context_model ............ False +100.83.134.158: block_data_path ................................. None +100.83.134.158: cache_fp8_weight ................................ False +100.83.134.158: cache_fp8_weight_fwd ............................ True +100.83.134.158: checkpoint_activations .......................... False +100.83.134.158: checkpoint_activations_granularity .............. full +100.83.134.158: checkpoint_in_cpu ............................... False +100.83.134.158: checkpoint_num_layers ........................... 1 +100.83.134.158: clearml_config_path ............................. None +100.83.134.158: clearml_continue_exp ............................ False +100.83.134.158: clearml_exp_name ................................ None +100.83.134.158: clip_grad ....................................... 1.0 +100.83.134.158: compression_training ............................ False +100.83.134.158: consumed_train_samples .......................... 0 +100.83.134.158: consumed_train_tokens ........................... 0 +100.83.134.158: consumed_valid_samples .......................... 0 +100.83.134.158: contigious_checkpointing ........................ False +100.83.134.158: cpu_optimizer ................................... False +100.83.134.158: cpu_torch_adam .................................. False +100.83.134.158: create_moe_param_group .......................... False +100.83.134.158: curriculum_learning ............................. False +100.83.134.158: data_idx_path ................................... None +100.83.134.158: data_impl ....................................... infer +100.83.134.158: data_parallel_size .............................. 16 +100.83.134.158: data_path ....................................... ['/data/arxiv//tokenized_text_document'] +100.83.134.158: data_sharding ................................... True +100.83.134.158: dataloader_type ................................. single +100.83.134.158: DDP_impl ........................................ local +100.83.134.158: decoder_seq_length .............................. None +100.83.134.158: deepscale ....................................... False +100.83.134.158: deepscale_config ................................ None +100.83.134.158: deepspeed ....................................... True +100.83.134.158: deepspeed_activation_checkpointing .............. False +100.83.134.158: deepspeed_config ................................ /data/output/llama13b_600M/26-04-2024-09:36:12/ds_config.json +100.83.134.158: deepspeed_mpi ................................... False +100.83.134.158: distribute_checkpointed_activations ............. False +100.83.134.158: distributed_backend ............................. hccl +100.83.134.158: do_layernorm_bias_weight_decay .................. False +100.83.134.158: do_pretrain_validation .......................... False +100.83.134.158: ds_inference .................................... False +100.83.134.158: ds_pipeline_enabled ............................. True +100.83.134.158: embed_layernorm ................................. False +100.83.134.158: embedding_path .................................. None +100.83.134.158: enable_expert_tensor_parallelism ................ False +100.83.134.158: encoder_seq_length .............................. 2048 +100.83.134.158: eod_mask_loss ................................... False +100.83.134.158: eval_interval ................................... 500 +100.83.134.158: eval_iters ...................................... 10 +100.83.134.158: eval_loss_exit_value ............................ None +100.83.134.158: eval_micro_batch_size ........................... 1 +100.83.134.158: evidence_data_path .............................. None +100.83.134.158: exit_duration_in_mins ........................... None +100.83.134.158: exit_interval ................................... 0 +100.83.134.158: expert_interval ................................. 2 +100.83.134.158: ffn_hidden_coeff ................................ 2.6666666666666665 +100.83.134.158: ffn_hidden_size ................................. 4096 +100.83.134.158: finetune ........................................ False +100.83.134.158: fix_position_emb_redundant_alloc ................ False +100.83.134.158: flatten_linear_operands ......................... False +100.83.134.158: fp16 ............................................ False +100.83.134.158: fp16_lm_cross_entropy ........................... False +100.83.134.158: fp32_residual_connection ........................ False +100.83.134.158: global_batch_size ............................... 256 +100.83.134.158: hf_save ......................................... /data/output/llama13b_600M/26-04-2024-09:36:12/hf_ckpt +100.83.134.158: hidden_dropout .................................. 0.1 +100.83.134.158: hidden_size ..................................... 2048 +100.83.134.158: hidden_size_teacher ............................. None +100.83.134.158: hpu_deterministic ............................... True +100.83.134.158: hpu_fp8_format .................................. e5m2 +100.83.134.158: hpu_fp8_measure_interval ........................ 10 +100.83.134.158: hysteresis ...................................... 2 +100.83.134.158: ict_head_size ................................... None +100.83.134.158: ict_load ........................................ None +100.83.134.158: img_dim ......................................... 224 +100.83.134.158: indexer_batch_size .............................. 128 +100.83.134.158: indexer_log_interval ............................ 1000 +100.83.134.158: inference ....................................... False +100.83.134.158: init_method_std ................................. 0.02 +100.83.134.158: init_method_xavier_uniform ...................... False +100.83.134.158: initial_loss_scale .............................. 4294967296 +100.83.134.158: kd .............................................. False +100.83.134.158: kd_alpha_ce ..................................... 1 +100.83.134.158: kd_beta_ce ...................................... 1 +100.83.134.158: kd_temp ......................................... 1.0 +100.83.134.158: kill_switch_path ................................ None +100.83.134.158: kv_channels ..................................... 64 +100.83.134.158: layernorm_epsilon ............................... 1e-06 +100.83.134.158: layernorm_type .................................. rmsnorm +100.83.134.158: lazy_mpu_init ................................... None +100.83.134.158: load ............................................ /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2 +100.83.134.158: load_teacher .................................... None +100.83.134.158: local_rank ...................................... 0 +100.83.134.158: log_batch_size_to_tensorboard ................... True +100.83.134.158: log_bwd_grads ................................... False +100.83.134.158: log_fwd_activations ............................. False +100.83.134.158: log_interval .................................... 10 +100.83.134.158: log_learning_rate_to_tensorboard ................ True +100.83.134.158: log_loss_scale_to_tensorboard ................... True +100.83.134.158: log_model_inputs ................................ False +100.83.134.158: log_num_zeros_in_grad ........................... False +100.83.134.158: log_optimizer_states_to_tensorboard ............. False +100.83.134.158: log_params_norm ................................. False +100.83.134.158: log_timers_to_tensorboard ....................... True +100.83.134.158: log_validation_ppl_to_tensorboard ............... True +100.83.134.158: loss_scale ...................................... None +100.83.134.158: loss_scale_window ............................... 1000 +100.83.134.158: lr .............................................. 0.0003 +100.83.134.158: lr_decay_iters .................................. None +100.83.134.158: lr_decay_samples ................................ None +100.83.134.158: lr_decay_style .................................. cosine +100.83.134.158: lr_decay_tokens ................................. None +100.83.134.158: lr_warmup_fraction .............................. None +100.83.134.158: lr_warmup_iters ................................. 2000 +100.83.134.158: lr_warmup_samples ............................... 0 +100.83.134.158: lr_warmup_tokens ................................ None +100.83.134.158: make_vocab_size_divisible_by .................... 128 +100.83.134.158: mask_prob ....................................... 0.15 +100.83.134.158: mask_tensor_adding .............................. False +100.83.134.158: masked_softmax_fusion ........................... False +100.83.134.158: max_position_embeddings ......................... None +100.83.134.158: memory_centric_tiled_linear ..................... False +100.83.134.158: merge_file ...................................... /data/arxiv//gpt2-merges.txt +100.83.134.158: micro_batch_size ................................ 1 +100.83.134.158: min_loss_scale .................................. 1.0 +100.83.134.158: min_lr .......................................... 0.0 +100.83.134.158: mlp_type ........................................ standard +100.83.134.158: mmap_warmup ..................................... False +100.83.134.158: moe_eval_capacity_factor ........................ 1.0 +100.83.134.158: moe_expert_parallel_size ........................ 1 +100.83.134.158: moe_loss_coeff .................................. 0.1 +100.83.134.158: moe_min_capacity ................................ 4 +100.83.134.158: moe_token_dropping .............................. True +100.83.134.158: moe_train_capacity_factor ....................... 1.0 +100.83.134.158: mos ............................................. False +100.83.134.158: no_bias ......................................... True +100.83.134.158: no_cuda ......................................... False +100.83.134.158: no_load_lr_state ................................ False +100.83.134.158: no_load_optim ................................... None +100.83.134.158: no_load_rng ..................................... None +100.83.134.158: no_pipeline_parallel ............................ False +100.83.134.158: no_save_optim ................................... None +100.83.134.158: no_save_rng ..................................... None +100.83.134.158: no_scaled_init .................................. False +100.83.134.158: num_attention_heads ............................. 32 +100.83.134.158: num_attention_heads_teacher ..................... None +100.83.134.158: num_channels .................................... 3 +100.83.134.158: num_classes ..................................... 1000 +100.83.134.158: num_experts ..................................... [1] +100.83.134.158: num_experts_teacher ............................. [1] +100.83.134.158: num_key_value_heads ............................. 32 +100.83.134.158: num_layers ...................................... 24 +100.83.134.158: num_layers_per_virtual_pipeline_stage ........... None +100.83.134.158: num_layers_teacher .............................. None +100.83.134.158: num_workers ..................................... 2 +100.83.134.158: onnx_safe ....................................... None +100.83.134.158: openai_gelu ..................................... False +100.83.134.158: optimizer ....................................... adamw +100.83.134.158: override_lr_scheduler ........................... False +100.83.134.158: params_dtype .................................... torch.bfloat16 +100.83.134.158: partition_activations ........................... False +100.83.134.158: patch_dim ....................................... 16 +100.83.134.158: pipeline_model_parallel_size .................... 1 +100.83.134.158: position_embedding_type ......................... PositionEmbeddingType.rotary +100.83.134.158: profile ......................................... None +100.83.134.158: profile_backward ................................ False +100.83.134.158: profile_steps ................................... 2,3 +100.83.134.158: query_in_block_prob ............................. 0.1 +100.83.134.158: rampup_batch_size ............................... None +100.83.134.158: rank ............................................ 0 +100.83.134.158: remote_device ................................... none +100.83.134.158: reset_attention_mask ............................ False +100.83.134.158: reset_iteration ................................. False +100.83.134.158: reset_position_ids .............................. False +100.83.134.158: retriever_report_topk_accuracies ................ [] +100.83.134.158: retriever_score_scaling ......................... False +100.83.134.158: retriever_seq_length ............................ 256 +100.83.134.158: sample_rate ..................................... 1.0 +100.83.134.158: save ............................................ /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2 +100.83.134.158: save_interval ................................... 500 +100.83.134.158: scatter_gather_tensors_in_pipeline .............. True +100.83.134.158: scattered_embeddings ............................ False +100.83.134.158: seed ............................................ 1234 +100.83.134.158: seq_length ...................................... 2048 +100.83.134.158: sequence_parallel ............................... False +100.83.134.158: sgd_momentum .................................... 0.9 +100.83.134.158: short_seq_prob .................................. 0.1 +100.83.134.158: skip_train ...................................... False +100.83.134.158: split ........................................... 969, 30, 1 +100.83.134.158: split_transformers .............................. False +100.83.134.158: synchronize_each_layer .......................... False +100.83.134.158: tensor_logger_max_iter .......................... 0 +100.83.134.158: tensor_logger_path .............................. None +100.83.134.158: tensor_model_parallel_size ...................... 1 +100.83.134.158: tensorboard_dir ................................. /data/output/llama13b_600M/26-04-2024-09:36:12/tensorboard +100.83.134.158: tensorboard_log_interval ........................ 1 +100.83.134.158: tensorboard_queue_size .......................... 1000 +100.83.134.158: test_data_path .................................. None +100.83.134.158: tile_factor ..................................... 1 +100.83.134.158: titles_data_path ................................ None +100.83.134.158: tokenizer_eod_id ................................ None +100.83.134.158: tokenizer_model_file ............................ None +100.83.134.158: tokenizer_type .................................. GPT2BPETokenizer +100.83.134.158: topk ............................................ 1 +100.83.134.158: train_data_path ................................. None +100.83.134.158: train_iters ..................................... 10000 +100.83.134.158: train_samples ................................... None +100.83.134.158: train_tokens .................................... None +100.83.134.158: universal_checkpoint ............................ False +100.83.134.158: use_checkpoint_lr_scheduler ..................... False +100.83.134.158: use_contiguous_buffers_in_ddp ................... True +100.83.134.158: use_cpu_initialization .......................... None +100.83.134.158: use_fused_sdpa .................................. True +100.83.134.158: use_fused_sdpa_with_recompute ................... False +100.83.134.158: use_hpu ......................................... True +100.83.134.158: use_hpu_fp8_transformer_engine .................. False +100.83.134.158: use_hpu_graphs .................................. False +100.83.134.158: use_one_sent_docs ............................... False +100.83.134.158: use_pin_memory .................................. False +100.83.134.158: use_rotary_v2 ................................... False +100.83.134.158: use_seq_len_plus_one_tokens ..................... True +100.83.134.158: use_torch_compile ............................... False +100.83.134.158: use_tutel ....................................... False +100.83.134.158: valid_data_path ................................. None +100.83.134.158: verify_checkpoint ............................... True +100.83.134.158: verify_checkpoint_model_type .................... LLAMA +100.83.134.158: verify_tp_workers ............................... False +100.83.134.158: verify_tp_workers_hash .......................... False +100.83.134.158: virtual_pipeline_model_parallel_size ............ None +100.83.134.158: vocab_extra_ids ................................. 0 +100.83.134.158: vocab_file ...................................... /data/arxiv//gpt2-vocab.json +100.83.134.158: weight_decay .................................... 0.1 +100.83.134.158: world_size ...................................... 16 +100.83.134.158: zero_allgather_bucket_size ...................... 0.0 +100.83.134.158: zero_contigious_gradients ....................... False +100.83.134.158: zero_reduce_bucket_size ......................... 0.0 +100.83.134.158: zero_reduce_scatter ............................. False +100.83.134.158: zero_stage ...................................... 0 +100.83.134.158: -------------------- end of arguments --------------------- +100.83.134.158: setting number of micro-batches to constant 16 +100.83.134.158: setting number of micro-batches to constant 16 +100.83.134.158: > building GPT2BPETokenizer tokenizer ... +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 3 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 3 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 2 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 2 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 1 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 1 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: > padded vocab (size: 50257) with 47 dummy tokens (new size: 50304) +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 0 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 0 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 5 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 5 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-26 09:36:22,649] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,650] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: hccl device_count: 8 +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-26 09:36:22,711] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,711] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: [2024-04-26 09:36:22,711] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,711] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-26 09:36:22,712] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,712] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: > initializing torch distributed ... +100.83.134.158: [2024-04-26 09:36:22,722] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,722] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: [2024-04-26 09:36:22,722] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 6 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 6 +100.83.134.158: args.distributed_backend: hccl +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-26 09:36:22,736] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,736] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: hccl device_count: 8 +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.158: [2024-04-26 09:36:22,791] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:22,791] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed C++/CUDA extension op report +100.83.134.158: -------------------------------------------------- +100.83.134.158: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.158: runtime if needed. Op compatibility means that your system +100.83.134.158: meet the required dependencies to JIT install the op. +100.83.134.158: -------------------------------------------------- +100.83.134.158: JIT compiled ops requires ninja +100.83.134.158: ninja .................. [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: op name ................ installed .. compatible +100.83.134.158: -------------------------------------------------- +100.83.134.158: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.158: fused_adam ............. [NO] ....... [OKAY] +100.83.134.158: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.158: transformer_inference .. [NO] ....... [OKAY] +100.83.134.158: -------------------------------------------------- +100.83.134.158: DeepSpeed general environment info: +100.83.134.158: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.158: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.158: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.158: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.158: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.158: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.158: fatal: not a git repository (or any parent up to mount point /) +100.83.134.158: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.158: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: _initialize_distributed: Initializing with below params: +100.83.134.158: args.local_rank: 4 +100.83.134.158: args.world_size: 16 +100.83.134.158: args.rank: 4 +100.83.134.158: args.distributed_backend: hccl +100.83.134.148: ---------------------------------------------------------------------------------------------------- +100.83.134.148: +100.83.134.148: DeepSpeed C++/CUDA extension op reportDeepSpeed C++/CUDA extension op report +100.83.134.148: +100.83.134.148: ---------------------------------------------------------------------------------------------------- +100.83.134.148: +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: +100.83.134.148: -------------------------------------------------- +100.83.134.148: --------------------------------------------------JIT compiled ops requires ninja +100.83.134.148: +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: ---------------------------------------------------------------------------------------------------- +100.83.134.148: --------------------------------------------------DeepSpeed C++/CUDA extension op report +100.83.134.148: +100.83.134.148: +100.83.134.148: DeepSpeed C++/CUDA extension op reportDeepSpeed C++/CUDA extension op report-------------------------------------------------- +100.83.134.148: +100.83.134.148: +100.83.134.148: ----------------------------------------------------------------------------------------------------NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: +100.83.134.148: +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.-------------------------------------------------- +100.83.134.148: +100.83.134.148: +100.83.134.148: ----------------------------------------------------------------------------------------------------JIT compiled ops requires ninja +100.83.134.148: +100.83.134.148: +100.83.134.148: JIT compiled ops requires ninjaJIT compiled ops requires ninja +100.83.134.148: +100.83.134.148: ninja .................. [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: ninjaop name .................................. installedninja[OKAY] +100.83.134.148: .. --------------------------------------------------compatible.................. +100.83.134.148: +100.83.134.148: [OKAY]op name-------------------------------------------------- +100.83.134.148: +100.83.134.148: ................-------------------------------------------------- +100.83.134.148: installed op name.. ................compatiblecpu_adam +100.83.134.148: installed --------------------------------------------------................. +100.83.134.148: [NO]compatible +100.83.134.148: .......-------------------------------------------------- +100.83.134.148: [OKAY] +100.83.134.148: cpu_adam ............... fused_adam[NO] .............cpu_adam....... ...............[NO][OKAY] +100.83.134.148: [NO]ninjaninja fused_adam .............. .................. ............. ..................[OKAY][NO][OKAY] [OKAY]....... +100.83.134.148: +100.83.134.148: +100.83.134.148: [OKAY]deepspeed_not_implemented[OKAY]fused_adam +100.83.134.148: -------------------------------------------------- +100.83.134.148: +100.83.134.148: --------------------------------------------------............. +100.83.134.148: op name deepspeed_not_implemented [NO]op name [NO]................ [NO] ....... ....... ................ installed[OKAY].......[OKAY]installed +100.83.134.148: +100.83.134.148: ..[OKAY]deepspeed_not_implementedtransformer_inference .. +100.83.134.148: compatible transformer_inferencecompatible.. +100.83.134.148: [NO][NO] +100.83.134.148: ----------------------------------------------------------------------------------------------------.. +100.83.134.148: .............. +100.83.134.148: [NO] [OKAY][OKAY]....... +100.83.134.148: +100.83.134.148: transformer_inferencecpu_adam [OKAY]-------------------------------------------------- ..cpu_adam............... +100.83.134.148: +100.83.134.148: ...............[NO] -------------------------------------------------- [NO][NO] +100.83.134.148: ....... ....... [OKAY].......[OKAY] +100.83.134.148: +100.83.134.148: [OKAY]-------------------------------------------------- +100.83.134.148: fused_adam +100.83.134.148: fused_adam .......................... [NO][NO] .............. [OKAY][OKAY] +100.83.134.148: +100.83.134.148: deepspeed_not_implementeddeepspeed_not_implemented [NO] [NO]DeepSpeed general environment info:DeepSpeed general environment info: ....... +100.83.134.148: +100.83.134.148: .......torch install path[OKAY]DeepSpeed general environment info: +100.83.134.148: torch install pathtorch install path +100.83.134.148: [OKAY]............... ..............................transformer_inference +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/torch'] transformer_inference +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/torch']['/usr/local/lib/python3.10/dist-packages/torch']....torch version +100.83.134.148: +100.83.134.148: [NO]torch version[NO]torch version.................... ......................................................2.1.1a0+gitb51c9f6 +100.83.134.148: [OKAY][OKAY]deepspeed install path2.1.1a0+gitb51c9f62.1.1a0+gitb51c9f6 +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: deepspeed install path deepspeed install path --------------------------------------------------......................-------------------------------------------------- +100.83.134.148: ........... +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/deepspeed']['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/deepspeed']deepspeed infodeepspeed info +100.83.134.148: deepspeed info...................................... ...................0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.00.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: +100.83.134.148: 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0deepspeed wheel compiled w.deepspeed wheel compiled w. +100.83.134.148: ............ deepspeed wheel compiled w. torch 2.1 +100.83.134.148: torch 2.1 ...... +100.83.134.148: shared memory (/dev/shm) sizetorch 2.1 shared memory (/dev/shm) size +100.83.134.148: ........shared memory (/dev/shm) size 503.72 GB503.72 GB.... +100.83.134.148: +100.83.134.148: 503.72 GB +100.83.134.148: DeepSpeed general environment info: +100.83.134.148: torch install path ............... DeepSpeed general environment info:['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: +100.83.134.148: torch install pathtorch version ................................... 2.1.1a0+gitb51c9f6 +100.83.134.148: deepspeed install path['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed']torch version +100.83.134.148: deepspeed info.................... ................... 2.1.1a0+gitb51c9f60.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: +100.83.134.148: deepspeed wheel compiled w.deepspeed install path ................. torch 2.1 ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: +100.83.134.148: shared memory (/dev/shm) sizedeepspeed info ....................... 503.72 GB0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: +100.83.134.148: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.148: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown ******** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: -------------------------------------------------- +100.83.134.148: --------------------------------------------------DeepSpeed C++/CUDA extension op report +100.83.134.148: +100.83.134.148: --------------------------------------------------DeepSpeed C++/CUDA extension op report +100.83.134.148: +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.-------------------------------------------------- +100.83.134.148: +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op.-------------------------------------------------- +100.83.134.148: +100.83.134.148: JIT compiled ops requires ninja-------------------------------------------------- +100.83.134.148: +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: ninja ..................ninja [OKAY] +100.83.134.148: .................. --------------------------------------------------[OKAY] +100.83.134.148: +100.83.134.148: op name-------------------------------------------------- +100.83.134.148: ................ op nameinstalled .................. installedcompatible +100.83.134.148: ..-------------------------------------------------- +100.83.134.148: compatible +100.83.134.148: -------------------------------------------------- +100.83.134.148: cpu_adam ...............cpu_adam [NO]............... .......[NO] [OKAY]....... +100.83.134.148: [OKAY] +100.83.134.148: fused_adam .............fused_adam [NO]............. .......[NO] [OKAY]....... +100.83.134.148: [OKAY] +100.83.134.148: deepspeed_not_implemented deepspeed_not_implemented [NO] .......[NO] [OKAY] +100.83.134.148: ....... [OKAY]transformer_inference +100.83.134.148: .. [NO]transformer_inference ......... [OKAY][NO] +100.83.134.148: .......-------------------------------------------------- +100.83.134.148: [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed general environment info: +100.83.134.148: DeepSpeed general environment info:torch install path +100.83.134.148: ...............torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: torch version ....................torch version ....................2.1.1a0+gitb51c9f6 +100.83.134.148: deepspeed install path2.1.1a0+gitb51c9f6 +100.83.134.148: ........... deepspeed install path ['/usr/local/lib/python3.10/dist-packages/deepspeed']........... +100.83.134.148: deepspeed info['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: ................... deepspeed info0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: ...................deepspeed wheel compiled w. 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0...... +100.83.134.148: deepspeed wheel compiled w.torch 2.1 +100.83.134.148: ...... shared memory (/dev/shm) sizetorch 2.1 +100.83.134.148: .... shared memory (/dev/shm) size503.72 GB +100.83.134.148: .... 503.72 GB +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.158: hccl device_count: 8 +100.83.134.158: [2024-04-26 09:36:23,084] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.158: [2024-04-26 09:36:23,084] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.158: warnings.warn( +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed C++/CUDA extension op report +100.83.134.148: -------------------------------------------------- +100.83.134.148: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.134.148: runtime if needed. Op compatibility means that your system +100.83.134.148: meet the required dependencies to JIT install the op. +100.83.134.148: -------------------------------------------------- +100.83.134.148: JIT compiled ops requires ninja +100.83.134.148: ninja .................. [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: op name ................ installed .. compatible +100.83.134.148: -------------------------------------------------- +100.83.134.148: cpu_adam ............... [NO] ....... [OKAY] +100.83.134.148: fused_adam ............. [NO] ....... [OKAY] +100.83.134.148: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.134.148: transformer_inference .. [NO] ....... [OKAY] +100.83.134.148: -------------------------------------------------- +100.83.134.148: DeepSpeed general environment info: +100.83.134.148: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.134.148: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.134.148: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.134.148: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.134.148: deepspeed wheel compiled w. ...... torch 2.1 +100.83.134.148: shared memory (/dev/shm) size .... 503.72 GB +100.83.134.148: fatal: not a git repository (or any parent up to mount point /) +100.83.134.148: Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +100.83.134.148: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 0 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 8 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 5 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 13 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 4 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 12 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 6 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 14 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 1 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 9 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 3 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 11 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 2 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 10 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: > setting tensorboard ... +100.83.134.148: _initialize_distributed: Initializing with below params: +100.83.134.148: args.local_rank: 7 +100.83.134.148: args.world_size: 16 +100.83.134.148: args.rank: 15 +100.83.134.148: args.distributed_backend: hccl +100.83.134.148: hccl device_count: 8 +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-26 09:36:23,214] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,214] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: [2024-04-26 09:36:23,214] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,214] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-26 09:36:23,215] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,215] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: hccl device_count: 8 +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-26 09:36:23,215] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,215] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:23,215] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,215] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: hccl device_count: 8 +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:23,217] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,218] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: hccl device_count: 8 +100.83.134.148: [2024-04-26 09:36:23,258] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,258] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: hccl device_count: 8 +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.134.148: warnings.warn( +100.83.134.148: [2024-04-26 09:36:23,262] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +100.83.134.148: [2024-04-26 09:36:23,262] [INFO] [comm.py:637:init_distributed] cdb=None +100.83.134.158: > initializing tensor model parallel with size 1 +100.83.134.158: > initializing pipeline model parallel with size 1 +100.83.134.158: > setting random seeds to 1234 ... +100.83.134.158: > initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +100.83.134.158: ============================= HABANA PT BRIDGE CONFIGURATION =========================== +100.83.134.158: PT_HPU_LAZY_MODE = 1 +100.83.134.158: PT_RECIPE_CACHE_PATH = +100.83.134.158: PT_CACHE_FOLDER_DELETE = 0 +100.83.134.158: PT_HPU_RECIPE_CACHE_CONFIG = +100.83.134.158: PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807 +100.83.134.158: PT_HPU_LAZY_ACC_PAR_MODE = 0 +100.83.134.158: PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0 +100.83.134.158: ---------------------------: System Configuration :--------------------------- +100.83.134.158: Num CPU Cores : 160 +100.83.134.158: CPU RAM : 1056375308 KB +100.83.134.158: ------------------------------------------------------------------------------ +100.83.134.158: > compiling dataset index builder ... +100.83.134.158: make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.158: make: Nothing to be done for 'default'. +100.83.134.158: make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.158: >>> done with dataset index builder. Compilation time: 0.021 seconds +100.83.134.158: WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +100.83.134.158: > compiling and loading fused kernels ... +100.83.134.148: > compiling dataset index builder ... +100.83.134.148: make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.148: make: Nothing to be done for 'default'. +100.83.134.148: make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +100.83.134.148: >>> done with dataset index builder. Compilation time: 0.020 seconds +100.83.134.158: >>> done with compiling and loading fused kernels. Compilation time: 0.553 seconds +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.158: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +100.83.134.148: wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-ivvk4z8l +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run wild-lion-2196 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ivvk4z8l +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-lniem2qj +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-u2bze58s +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-loemx40t +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run dazzling-snowball-2196 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/lniem2qj +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-he15rnbr +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run tough-silence-2196 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/u2bze58s +100.83.134.158: wandb: Syncing run graceful-darkness-2196 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/loemx40t +100.83.134.158: wandb: Syncing run likely-eon-2206 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/he15rnbr +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-1oyi2cwb +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-y9lmhtq3 +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run fiery-microwave-2196 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/1oyi2cwb +100.83.134.158: wandb: Syncing run feasible-night-2196 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/y9lmhtq3 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-771q2vtr +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run young-plasma-2196 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/771q2vtr +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-imr059bz +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run noble-monkey-2196 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/imr059bz +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-lalt8myr +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run azure-brook-2196 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/lalt8myr +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-ssef3hd5 +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Tracking run with wandb version 0.16.6 +100.83.134.158: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-08src8k6 +100.83.134.158: wandb: Run `wandb offline` to turn off syncing. +100.83.134.158: wandb: Syncing run rose-haze-2196 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ssef3hd5 +100.83.134.158: wandb: Syncing run copper-durian-2203 +100.83.134.158: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.158: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/08src8k6 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093630-l6kkb93d +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-64p615c9 +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run jolly-armadillo-2210 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/l6kkb93d +100.83.134.148: wandb: Syncing run silvery-dream-2196 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/64p615c9 +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093629-yilg26ml +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run ancient-smoke-2205 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/yilg26ml +100.83.134.148: wandb: Tracking run with wandb version 0.16.6 +100.83.134.148: wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240426_093630-npncm469 +100.83.134.148: wandb: Run `wandb offline` to turn off syncing. +100.83.134.148: wandb: Syncing run young-sound-2211 +100.83.134.148: wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +100.83.134.148: wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/npncm469 +100.83.134.158: time to initialize megatron (seconds): 31.290 +100.83.134.158: [after megatron is initialized] datetime: 2024-04-26 09:36:31 +100.83.134.158: building LLaMA model ... +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.158: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.158: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.158: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +100.83.134.148: +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.148: *************** Using FusedSDPA ****************** +100.83.134.158: [2024-04-26 09:36:31,377] [INFO] [utils.py:824:see_memory_usage] Before Building Model +100.83.134.158: [2024-04-26 09:36:31,381] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:31,381] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.34 GB, percent = 43.5% +100.83.134.148: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) +100.83.134.148: return super().__torch_function__(func, types, new_args, kwargs) +100.83.134.158: SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +100.83.134.158: Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=1, model=0): 1, ProcessCoord(pipe=0, data=2, model=0): 2, ProcessCoord(pipe=0, data=3, model=0): 3, ProcessCoord(pipe=0, data=4, model=0): 4, ProcessCoord(pipe=0, data=5, model=0): 5, ProcessCoord(pipe=0, data=6, model=0): 6, ProcessCoord(pipe=0, data=7, model=0): 7, ProcessCoord(pipe=0, data=8, model=0): 8, ProcessCoord(pipe=0, data=9, model=0): 9, ProcessCoord(pipe=0, data=10, model=0): 10, ProcessCoord(pipe=0, data=11, model=0): 11, ProcessCoord(pipe=0, data=12, model=0): 12, ProcessCoord(pipe=0, data=13, model=0): 13, ProcessCoord(pipe=0, data=14, model=0): 14, ProcessCoord(pipe=0, data=15, model=0): 15} +100.83.134.158: [2024-04-26 09:36:31,384] [INFO] [module.py:375:_partition_layers] Partitioning pipeline stages with method type:transformer +100.83.134.158: stage=0 layers=32 +100.83.134.158: 0: _to_float16 +100.83.134.158: 1: EmbeddingPipe +100.83.134.158: 2: +100.83.134.158: 3: ParallelTransformerLayerPipe +100.83.134.158: 4: ParallelTransformerLayerPipe +100.83.134.158: 5: ParallelTransformerLayerPipe +100.83.134.158: 6: ParallelTransformerLayerPipe +100.83.134.158: 7: ParallelTransformerLayerPipe +100.83.134.158: 8: ParallelTransformerLayerPipe +100.83.134.158: 9: ParallelTransformerLayerPipe +100.83.134.158: 10: ParallelTransformerLayerPipe +100.83.134.158: 11: ParallelTransformerLayerPipe +100.83.134.158: 12: ParallelTransformerLayerPipe +100.83.134.158: 13: ParallelTransformerLayerPipe +100.83.134.158: 14: ParallelTransformerLayerPipe +100.83.134.158: 15: ParallelTransformerLayerPipe +100.83.134.158: 16: ParallelTransformerLayerPipe +100.83.134.158: 17: ParallelTransformerLayerPipe +100.83.134.158: 18: ParallelTransformerLayerPipe +100.83.134.158: 19: ParallelTransformerLayerPipe +100.83.134.158: 20: ParallelTransformerLayerPipe +100.83.134.158: 21: ParallelTransformerLayerPipe +100.83.134.158: 22: ParallelTransformerLayerPipe +100.83.134.158: 23: ParallelTransformerLayerPipe +100.83.134.158: 24: ParallelTransformerLayerPipe +100.83.134.158: 25: ParallelTransformerLayerPipe +100.83.134.158: 26: ParallelTransformerLayerPipe +100.83.134.158: 27: +100.83.134.158: 28: WrapName +100.83.134.158: 29: WrapName +100.83.134.158: 30: +100.83.134.158: 31: float16_to_fp32 +100.83.134.158: loss: CrossEntropy +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: *************** Using FusedSDPA ****************** +100.83.134.158: [2024-04-26 09:36:31,550] [INFO] [utils.py:824:see_memory_usage] After Building Model +100.83.134.158: [2024-04-26 09:36:31,553] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:31,553] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.47 GB, percent = 43.5% +100.83.134.158: > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 1212778496 +100.83.134.158: > learning rate decay style: cosine +100.83.134.158: DeepSpeed is enabled. +100.83.134.158: [2024-04-26 09:36:31,558] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.4+hpu.synapse.v1.14.0, git-hash=fad45b2, git-branch=1.14.0 +100.83.134.158: [2024-04-26 09:36:32,462] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False +100.83.134.158: [2024-04-26 09:36:32,464] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer +100.83.134.158: [2024-04-26 09:36:32,464] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer +100.83.134.158: [2024-04-26 09:36:32,468] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW +100.83.134.158: [2024-04-26 09:36:32,468] [INFO] [logging.py:96:log_dist] [Rank 0] Creating BF16 optimizer +100.83.134.158: [2024-04-26 09:36:32,561] [INFO] [utils.py:824:see_memory_usage] begin bf16_optimizer +100.83.134.158: [2024-04-26 09:36:32,565] [INFO] [utils.py:825:see_memory_usage] MA 2.27 GB Max_MA 2.29 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:32,565] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.78 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:32,651] [INFO] [utils.py:824:see_memory_usage] before initializing group 0 +100.83.134.158: [2024-04-26 09:36:32,654] [INFO] [utils.py:825:see_memory_usage] MA 2.27 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:32,655] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.79 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:32,694] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,697] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,706] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,708] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,709] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,717] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,722] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,729] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,730] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,732] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,742] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,743] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,755] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,772] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.148: [2024-04-26 09:36:32,785] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:32,982] [INFO] [utils.py:824:see_memory_usage] after initializing group 0 +100.83.134.158: [2024-04-26 09:36:32,986] [INFO] [utils.py:825:see_memory_usage] MA 2.27 GB Max_MA 4.53 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:32,986] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.88 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:33,047] [INFO] [utils.py:824:see_memory_usage] before initializing group 1 +100.83.134.158: [2024-04-26 09:36:33,050] [INFO] [utils.py:825:see_memory_usage] MA 2.27 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:33,050] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.89 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:33,129] [INFO] [utils.py:824:see_memory_usage] after initializing group 1 +100.83.134.158: [2024-04-26 09:36:33,132] [INFO] [utils.py:825:see_memory_usage] MA 7.07 GB Max_MA 7.07 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:33,133] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.89 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:33,188] [INFO] [utils.py:824:see_memory_usage] before initialize_optimizer +100.83.134.158: [2024-04-26 09:36:33,191] [INFO] [utils.py:825:see_memory_usage] MA 7.07 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:33,192] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.89 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:33,250] [INFO] [utils.py:824:see_memory_usage] end initialize_optimizer +100.83.134.158: [2024-04-26 09:36:33,254] [INFO] [utils.py:825:see_memory_usage] MA 7.07 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:33,254] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.89 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:33,319] [INFO] [utils.py:824:see_memory_usage] end bf16_optimizer +100.83.134.158: [2024-04-26 09:36:33,323] [INFO] [utils.py:825:see_memory_usage] MA 7.07 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +100.83.134.158: [2024-04-26 09:36:33,323] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 438.89 GB, percent = 43.6% +100.83.134.158: [2024-04-26 09:36:33,324] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = BF16_Optimizer +100.83.134.158: [2024-04-26 09:36:33,324] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler +100.83.134.158: [2024-04-26 09:36:33,324] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = +100.83.134.158: [2024-04-26 09:36:33,324] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: [2024-04-26 09:36:33,325] [INFO] [config.py:992:print] DeepSpeedEngine configuration: +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] activation_checkpointing_config { +100.83.134.158: "partition_activations": false, +100.83.134.158: "contiguous_memory_optimization": false, +100.83.134.158: "cpu_checkpointing": false, +100.83.134.158: "number_checkpoints": null, +100.83.134.158: "synchronize_checkpoint_boundary": false, +100.83.134.158: "profile": false +100.83.134.158: } +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] amp_enabled .................. False +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] amp_params ................... False +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] autotuning_config ............ { +100.83.134.158: "enabled": false, +100.83.134.158: "start_step": null, +100.83.134.158: "end_step": null, +100.83.134.158: "metric_path": null, +100.83.134.158: "arg_mappings": null, +100.83.134.158: "metric": "throughput", +100.83.134.158: "model_info": null, +100.83.134.158: "results_dir": "autotuning_results", +100.83.134.158: "exps_dir": "autotuning_exps", +100.83.134.158: "overwrite": true, +100.83.134.158: "fast": true, +100.83.134.158: "start_profile_step": 3, +100.83.134.158: "end_profile_step": 5, +100.83.134.158: "tuner_type": "gridsearch", +100.83.134.158: "tuner_early_stopping": 5, +100.83.134.158: "tuner_num_trials": 50, +100.83.134.158: "model_info_path": null, +100.83.134.158: "mp_size": 1, +100.83.134.158: "max_train_batch_size": null, +100.83.134.158: "min_train_batch_size": 1, +100.83.134.158: "max_train_micro_batch_size_per_gpu": 1.024000e+03, +100.83.134.158: "min_train_micro_batch_size_per_gpu": 1, +100.83.134.158: "num_tuning_micro_batch_sizes": 3 +100.83.134.158: } +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] bfloat16_accumulate_grads_via_hooks True +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] bfloat16_enabled ............. True +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] checkpoint_parallel_write_pipeline False +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] checkpoint_tag_validation_enabled True +100.83.134.158: [2024-04-26 09:36:33,326] [INFO] [config.py:996:print] checkpoint_tag_validation_fail False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] comms_config ................. +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] communication_data_type ...... None +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] curriculum_enabled_legacy .... False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] curriculum_params_legacy ..... False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] data_efficiency_enabled ...... False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] dataloader_drop_last ......... False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] disable_allgather ............ False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] dump_state ................... False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] dynamic_loss_scale_args ...... None +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_enabled ........... False +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_gas_boundary_resolution 1 +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_layer_name ........ bert.encoder.layer +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_layer_num ......... 0 +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_max_iter .......... 100 +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_stability ......... 1e-06 +100.83.134.158: [2024-04-26 09:36:33,327] [INFO] [config.py:996:print] eigenvalue_tol ............... 0.01 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] eigenvalue_verbose ........... False +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] elasticity_enabled ........... False +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] flops_profiler_config ........ { +100.83.134.158: "enabled": false, +100.83.134.158: "recompute_fwd_factor": 0.0, +100.83.134.158: "profile_step": 1, +100.83.134.158: "module_depth": -1, +100.83.134.158: "top_modules": 1, +100.83.134.158: "detailed": true, +100.83.134.158: "output_file": null +100.83.134.158: } +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] fp16_auto_cast ............... None +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] fp16_enabled ................. False +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] fp16_master_weights_and_gradients False +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] global_rank .................. 0 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] grad_accum_dtype ............. None +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] gradient_accumulation_steps .. 16 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] gradient_clipping ............ 1.0 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] gradient_predivide_factor .... 1.0 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] initial_dynamic_scale ........ 1 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] load_universal_checkpoint .... False +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] loss_scale ................... 1.0 +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] memory_breakdown ............. False +100.83.134.158: [2024-04-26 09:36:33,328] [INFO] [config.py:996:print] mics_hierarchial_params_gather False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] mics_shard_size .............. -1 +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] nebula_config ................ { +100.83.134.158: "enabled": false, +100.83.134.158: "persistent_storage_path": null, +100.83.134.158: "persistent_time_interval": 100, +100.83.134.158: "num_of_version_in_retention": 2, +100.83.134.158: "enable_nebula_load": true, +100.83.134.158: "load_path": null +100.83.134.158: } +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] optimizer_legacy_fusion ...... False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] optimizer_name ............... None +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] optimizer_params ............. None +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': False, 'grad_partitioned': False} +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] pld_enabled .................. False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] pld_params ................... False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] prescale_gradients ........... False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] scheduler_name ............... None +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] scheduler_params ............. None +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] seq_parallel_communication_data_type torch.float32 +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] sparse_attention ............. None +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] sparse_gradients_enabled ..... False +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] steps_per_print .............. 10 +100.83.134.158: [2024-04-26 09:36:33,329] [INFO] [config.py:996:print] train_batch_size ............. 256 +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] train_micro_batch_size_per_gpu 1 +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] use_data_before_expert_parallel_ False +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] use_node_local_storage ....... False +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] wall_clock_breakdown ......... False +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] weight_quantization_config ... None +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] world_size ................... 16 +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] zero_allow_comm_data_type_fp32 False +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] zero_allow_untested_optimizer False +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] zero_config .................. stage=0 contiguous_gradients=True reduce_scatter=False reduce_bucket_size=500,000,000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False use_all_reduce_for_fetch_params=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] zero_enabled ................. False +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] zero_force_ds_cpu_optimizer .. True +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:996:print] zero_optimization_stage ...... 0 +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [config.py:982:print_user_config] json = { +100.83.134.158: "train_batch_size": 256, +100.83.134.158: "train_micro_batch_size_per_gpu": 1, +100.83.134.158: "steps_per_print": 10, +100.83.134.158: "gradient_clipping": 1.0, +100.83.134.158: "zero_optimization": { +100.83.134.158: "stage": 0 +100.83.134.158: }, +100.83.134.158: "bf16": { +100.83.134.158: "enabled": true, +100.83.134.158: "accumulate_grads_via_hooks": true +100.83.134.158: }, +100.83.134.158: "fp16": { +100.83.134.158: "enabled": false +100.83.134.158: }, +100.83.134.158: "wall_clock_breakdown": false, +100.83.134.158: "pipeline": { +100.83.134.158: "pipe_partitioned": false, +100.83.134.158: "grad_partitioned": false +100.83.134.158: } +100.83.134.158: } +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [engine.py:99:__init__] CONFIG: micro_batches=16 micro_batch_size=1 +100.83.134.158: [2024-04-26 09:36:33,330] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +100.83.134.158: [2024-04-26 09:36:33,598] [INFO] [engine.py:180:__init__] RANK=0 STAGE=0 LAYERS=32 [0, 32) STAGE_PARAMS=1212778496 (1212.778M) TOTAL_PARAMS=1212778496 (1212.778M) UNIQUE_PARAMS=1212778496 (1212.778M) +100.83.134.158: [2024-04-26 09:36:33,600] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: WARNING: could not find the metadata file /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2 [2024-04-26 09:36:33,600] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: +100.83.134.158: will not load any checkpoints and will start from random[2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: +100.83.134.158: [2024-04-26 09:36:33,600] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.158: [2024-04-26 09:36:33,601] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +100.83.134.148: time (ms) | load-checkpoint: 2.86 +100.83.134.158: [after model, optimizer, and learning rate scheduler are built] datetime: 2024-04-26 09:36:33 +100.83.134.158: > building train, validation, and test datasets ... +100.83.134.158: > datasets target sizes (minimum size): +100.83.134.158: train: 2560000 +100.83.134.158: validation: 53760 +100.83.134.158: test: 2560 +100.83.134.158: > building train, validation, and test datasets for GPT ... +100.83.134.158: Single data path provided for train, valid & test +100.83.134.158: > building dataset index ... +100.83.134.158: reading sizes... +100.83.134.158: reading pointers... +100.83.134.158: reading document index... +100.83.134.158: creating numpy buffer of mmap... +100.83.134.158: creating memory view of numpy buffer... +100.83.134.158: > finished creating indexed dataset in 0.000598 seconds +100.83.134.158: number of documents: 1558306 +100.83.134.158: > dataset split: +100.83.134.158: train: +100.83.134.158: document indices in [0, 1509999) total of 1509999 documents +100.83.134.158: validation: +100.83.134.158: document indices in [1509999, 1556748) total of 46749 documents +100.83.134.158: test: +100.83.134.158: document indices in [1556748, 1558306) total of 1558 documents +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: loaded indexed file in 0.002 seconds +100.83.134.158: total number of samples: 15244235 +100.83.134.158: total number of epochs: 1 +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > WARNING: could not find index map files, building the indices on rank 0 ... +100.83.134.158: > only one epoch required, setting separate_last_epoch to False +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.158: Saving dataset index file to /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.158: > saved doc-idx mapping to /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.158: > elasped time to build and save doc-idx mapping (seconds): 0.003330 +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: using: +100.83.134.158: number of documents: 46749 +100.83.134.158: number of epochs: 1 +100.83.134.158: sequence length: 2048 +100.83.134.158: total number of samples: 481161 +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +100.83.134.158: Saving dataset index file to /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > saved sample-idx mapping to /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.158: > elasped time to build and save sample-idx mapping (seconds): 0.005666 +100.83.134.158: > building shuffle index with split [0, 481161) and [481161, 481161) ... +100.83.134.158: Saving dataset index file to /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > saved shuffle-idx mapping to /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: > elasped time to build and save shuffle-idx mapping (seconds): 0.012002 +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.148: +100.83.134.158: +100.83.134.158: +100.83.134.148: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.158: > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.158: > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_53760ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: loaded indexed file in 0.002 seconds +100.83.134.158: total number of samples: 481162 +100.83.134.158: total number of epochs: 1 +100.83.134.148: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.148: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.148: +100.83.134.158: > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +100.83.134.158: > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: loaded indexed file in 0.001 seconds +100.83.134.158: total number of samples: 16581 +100.83.134.158: total number of epochs: 1 +100.83.134.158: > finished creating GPT datasets ... +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.158: +100.83.134.158: +100.83.134.158: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.158: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.148: +100.83.134.158: [after dataloaders are built] datetime: 2024-04-26 09:36:35 +100.83.134.158: done with setup ... +100.83.134.158: training ... +100.83.134.148: time (ms) | model-and-optimizer-setup: 2312.04 | train/valid/test-data-iterators-setup: 1593.44 +100.83.134.158: [before the start of training step] datetime: 2024-04-26 09:36:35 +100.83.134.158: [2024-04-26 09:37:41,702] [INFO] [logging.py:96:log_dist] [Rank 0] step=10, skipped=0, lr=[1.4999999999999998e-06, 1.4999999999999998e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 10 loss: 10.9390 iter time (s): 6.749 samples/sec: 37.930 +100.83.134.158: [Rank 0] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +100.83.134.148: iteration 10/ 10000 | consumed samples: 2560 | consumed tokens: 5242880 | elapsed time per iteration (ms): 6645.2 | learning rate: 1.500E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 38.524 | TFLOPs: 43.98 | +100.83.134.158: [2024-04-26 09:38:18,241] [INFO] [logging.py:96:log_dist] [Rank 0] step=20, skipped=0, lr=[2.9999999999999997e-06, 2.9999999999999997e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 20 loss: 9.1166 iter time (s): 3.654 samples/sec: 70.054 +100.83.134.148: iteration 20/ 10000 | consumed samples: 5120 | consumed tokens: 10485760 | elapsed time per iteration (ms): 3653.9 | learning rate: 3.000E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 70.062 | TFLOPs: 79.99 | +100.83.134.158: [2024-04-26 09:38:54,394] [INFO] [logging.py:96:log_dist] [Rank 0] step=30, skipped=0, lr=[4.499999999999999e-06, 4.499999999999999e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 30 loss: 8.2314 iter time (s): 3.615 samples/sec: 70.813 +100.83.134.148: iteration 30/ 10000 | consumed samples: 7680 | consumed tokens: 15728640 | elapsed time per iteration (ms): 3615.2 | learning rate: 4.500E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 70.812 | TFLOPs: 80.84 | +100.83.134.158: [2024-04-26 09:39:31,044] [INFO] [logging.py:96:log_dist] [Rank 0] step=40, skipped=0, lr=[5.999999999999999e-06, 5.999999999999999e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 40 loss: 7.9237 iter time (s): 3.665 samples/sec: 69.849 +100.83.134.148: iteration 40/ 10000 | consumed samples: 10240 | consumed tokens: 20971520 | elapsed time per iteration (ms): 3665.2 | learning rate: 6.000E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.846 | TFLOPs: 79.74 | +100.83.134.158: [2024-04-26 09:40:07,861] [INFO] [logging.py:96:log_dist] [Rank 0] step=50, skipped=0, lr=[7.499999999999999e-06, 7.499999999999999e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 50 loss: 7.5499 iter time (s): 3.681 samples/sec: 69.537 +100.83.134.148: iteration 50/ 10000 | consumed samples: 12800 | consumed tokens: 26214400 | elapsed time per iteration (ms): 3681.4 | learning rate: 7.500E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.538 | TFLOPs: 79.39 | +100.83.134.158: [2024-04-26 09:40:44,864] [INFO] [logging.py:96:log_dist] [Rank 0] step=60, skipped=0, lr=[8.999999999999999e-06, 8.999999999999999e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 60 loss: 6.8255 iter time (s): 3.700 samples/sec: 69.184 +100.83.134.148: iteration 60/ 10000 | consumed samples: 15360 | consumed tokens: 31457280 | elapsed time per iteration (ms): 3700.3 | learning rate: 9.000E-06 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.183 | TFLOPs: 78.98 | +100.83.134.158: [2024-04-26 09:41:22,441] [INFO] [logging.py:96:log_dist] [Rank 0] step=70, skipped=0, lr=[1.05e-05, 1.05e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 70 loss: 6.3908 iter time (s): 3.758 samples/sec: 68.119 +100.83.134.148: iteration 70/ 10000 | consumed samples: 17920 | consumed tokens: 36700160 | elapsed time per iteration (ms): 3757.4 | learning rate: 1.050E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.132 | TFLOPs: 77.78 | +100.83.134.158: [2024-04-26 09:42:00,152] [INFO] [logging.py:96:log_dist] [Rank 0] step=80, skipped=0, lr=[1.1999999999999999e-05, 1.1999999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 80 loss: 6.1744 iter time (s): 3.771 samples/sec: 67.881 +100.83.134.148: iteration 80/ 10000 | consumed samples: 20480 | consumed tokens: 41943040 | elapsed time per iteration (ms): 3771.5 | learning rate: 1.200E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.878 | TFLOPs: 77.49 | +100.83.134.158: [2024-04-26 09:42:37,432] [INFO] [logging.py:96:log_dist] [Rank 0] step=90, skipped=0, lr=[1.3499999999999998e-05, 1.3499999999999998e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 90 loss: 5.9417 iter time (s): 3.728 samples/sec: 68.666 +100.83.134.148: iteration 90/ 10000 | consumed samples: 23040 | consumed tokens: 47185920 | elapsed time per iteration (ms): 3728.1 | learning rate: 1.350E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.668 | TFLOPs: 78.40 | +100.83.134.158: [2024-04-26 09:43:15,289] [INFO] [logging.py:96:log_dist] [Rank 0] step=100, skipped=0, lr=[1.4999999999999999e-05, 1.4999999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 100 loss: 5.7495 iter time (s): 3.786 samples/sec: 67.623 +100.83.134.148: iteration 100/ 10000 | consumed samples: 25600 | consumed tokens: 52428800 | elapsed time per iteration (ms): 3785.7 | learning rate: 1.500E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.623 | TFLOPs: 77.20 | +100.83.134.158: [2024-04-26 09:43:53,034] [INFO] [logging.py:96:log_dist] [Rank 0] step=110, skipped=0, lr=[1.6499999999999998e-05, 1.6499999999999998e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 110 loss: 5.4953 iter time (s): 3.774 samples/sec: 67.827 +100.83.134.148: iteration 110/ 10000 | consumed samples: 28160 | consumed tokens: 57671680 | elapsed time per iteration (ms): 3774.5 | learning rate: 1.650E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.824 | TFLOPs: 77.43 | +100.83.134.158: [2024-04-26 09:44:30,413] [INFO] [logging.py:96:log_dist] [Rank 0] step=120, skipped=0, lr=[1.7999999999999997e-05, 1.7999999999999997e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 120 loss: 5.3656 iter time (s): 3.738 samples/sec: 68.487 +100.83.134.148: iteration 120/ 10000 | consumed samples: 30720 | consumed tokens: 62914560 | elapsed time per iteration (ms): 3737.7 | learning rate: 1.800E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.491 | TFLOPs: 78.19 | +100.83.134.158: [2024-04-26 09:45:07,678] [INFO] [logging.py:96:log_dist] [Rank 0] step=130, skipped=0, lr=[1.95e-05, 1.95e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 130 loss: 5.2566 iter time (s): 3.727 samples/sec: 68.687 +100.83.134.148: iteration 130/ 10000 | consumed samples: 33280 | consumed tokens: 68157440 | elapsed time per iteration (ms): 3726.8 | learning rate: 1.950E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.691 | TFLOPs: 78.42 | +100.83.134.158: [2024-04-26 09:45:45,286] [INFO] [logging.py:96:log_dist] [Rank 0] step=140, skipped=0, lr=[2.1e-05, 2.1e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 140 loss: 5.0603 iter time (s): 3.762 samples/sec: 68.055 +100.83.134.148: iteration 140/ 10000 | consumed samples: 35840 | consumed tokens: 73400320 | elapsed time per iteration (ms): 3760.6 | learning rate: 2.100E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.074 | TFLOPs: 77.72 | +100.83.134.158: [2024-04-26 09:46:22,490] [INFO] [logging.py:96:log_dist] [Rank 0] step=150, skipped=0, lr=[2.2499999999999998e-05, 2.2499999999999998e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 150 loss: 4.8199 iter time (s): 3.721 samples/sec: 68.805 +100.83.134.148: iteration 150/ 10000 | consumed samples: 38400 | consumed tokens: 78643200 | elapsed time per iteration (ms): 3720.5 | learning rate: 2.250E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.807 | TFLOPs: 78.56 | +100.83.134.158: [2024-04-26 09:46:59,516] [INFO] [logging.py:96:log_dist] [Rank 0] step=160, skipped=0, lr=[2.3999999999999997e-05, 2.3999999999999997e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 160 loss: 4.8251 iter time (s): 3.704 samples/sec: 69.122 +100.83.134.148: iteration 160/ 10000 | consumed samples: 40960 | consumed tokens: 83886080 | elapsed time per iteration (ms): 3702.5 | learning rate: 2.400E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.143 | TFLOPs: 78.94 | +100.83.134.158: [2024-04-26 09:47:37,058] [INFO] [logging.py:96:log_dist] [Rank 0] step=170, skipped=0, lr=[2.55e-05, 2.55e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 170 loss: 4.6819 iter time (s): 3.755 samples/sec: 68.182 +100.83.134.148: iteration 170/ 10000 | consumed samples: 43520 | consumed tokens: 89128960 | elapsed time per iteration (ms): 3754.2 | learning rate: 2.550E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.190 | TFLOPs: 77.85 | +100.83.134.158: wandb: ERROR Error while calling W&B API: context deadline exceeded () +100.83.134.148: wandb: ERROR Error while calling W&B API: context deadline exceeded () +100.83.134.158: [2024-04-26 09:48:14,441] [INFO] [logging.py:96:log_dist] [Rank 0] step=180, skipped=0, lr=[2.6999999999999996e-05, 2.6999999999999996e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 180 loss: 4.4659 iter time (s): 3.738 samples/sec: 68.483 +100.83.134.148: iteration 180/ 10000 | consumed samples: 46080 | consumed tokens: 94371840 | elapsed time per iteration (ms): 3738.4 | learning rate: 2.700E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.479 | TFLOPs: 78.18 | +100.83.134.158: [2024-04-26 09:48:51,706] [INFO] [logging.py:96:log_dist] [Rank 0] step=190, skipped=0, lr=[2.8499999999999998e-05, 2.8499999999999998e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 190 loss: 4.4005 iter time (s): 3.726 samples/sec: 68.698 +100.83.134.148: iteration 190/ 10000 | consumed samples: 48640 | consumed tokens: 99614720 | elapsed time per iteration (ms): 3726.4 | learning rate: 2.850E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.698 | TFLOPs: 78.43 | +100.83.134.158: [2024-04-26 09:49:29,334] [INFO] [logging.py:96:log_dist] [Rank 0] step=200, skipped=0, lr=[2.9999999999999997e-05, 2.9999999999999997e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 200 loss: 4.2920 iter time (s): 3.763 samples/sec: 68.030 +100.83.134.148: iteration 200/ 10000 | consumed samples: 51200 | consumed tokens: 104857600 | elapsed time per iteration (ms): 3762.7 | learning rate: 3.000E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.035 | TFLOPs: 77.67 | +100.83.134.158: [2024-04-26 09:50:06,539] [INFO] [logging.py:96:log_dist] [Rank 0] step=210, skipped=0, lr=[3.15e-05, 3.15e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 210 loss: 4.2676 iter time (s): 3.721 samples/sec: 68.807 +100.83.134.148: iteration 210/ 10000 | consumed samples: 53760 | consumed tokens: 110100480 | elapsed time per iteration (ms): 3720.4 | learning rate: 3.150E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.810 | TFLOPs: 78.56 | +100.83.134.158: [2024-04-26 09:50:44,074] [INFO] [logging.py:96:log_dist] [Rank 0] step=220, skipped=0, lr=[3.2999999999999996e-05, 3.2999999999999996e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 220 loss: 4.1078 iter time (s): 3.754 samples/sec: 68.196 +100.83.134.148: iteration 220/ 10000 | consumed samples: 56320 | consumed tokens: 115343360 | elapsed time per iteration (ms): 3753.7 | learning rate: 3.300E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.199 | TFLOPs: 77.86 | +100.83.134.158: [2024-04-26 09:51:21,473] [INFO] [logging.py:96:log_dist] [Rank 0] step=230, skipped=0, lr=[3.45e-05, 3.45e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 230 loss: 4.1238 iter time (s): 3.740 samples/sec: 68.453 +100.83.134.148: iteration 230/ 10000 | consumed samples: 58880 | consumed tokens: 120586240 | elapsed time per iteration (ms): 3740.0 | learning rate: 3.450E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.449 | TFLOPs: 78.15 | +100.83.134.158: [2024-04-26 09:51:59,139] [INFO] [logging.py:96:log_dist] [Rank 0] step=240, skipped=0, lr=[3.5999999999999994e-05, 3.5999999999999994e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 240 loss: 3.9959 iter time (s): 3.767 samples/sec: 67.964 +100.83.134.148: iteration 240/ 10000 | consumed samples: 61440 | consumed tokens: 125829120 | elapsed time per iteration (ms): 3766.9 | learning rate: 3.600E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.960 | TFLOPs: 77.59 | +100.83.134.158: [2024-04-26 09:52:37,040] [INFO] [logging.py:96:log_dist] [Rank 0] step=250, skipped=0, lr=[3.75e-05, 3.75e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 250 loss: 4.0875 iter time (s): 3.790 samples/sec: 67.550 +100.83.134.148: iteration 250/ 10000 | consumed samples: 64000 | consumed tokens: 131072000 | elapsed time per iteration (ms): 3789.4 | learning rate: 3.750E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.556 | TFLOPs: 77.13 | +100.83.134.158: [2024-04-26 09:53:14,523] [INFO] [logging.py:96:log_dist] [Rank 0] step=260, skipped=0, lr=[3.9e-05, 3.9e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 260 loss: 3.9880 iter time (s): 3.748 samples/sec: 68.297 +100.83.134.148: iteration 260/ 10000 | consumed samples: 66560 | consumed tokens: 136314880 | elapsed time per iteration (ms): 3748.4 | learning rate: 3.900E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.296 | TFLOPs: 77.97 | +100.83.134.158: [2024-04-26 09:53:51,329] [INFO] [logging.py:96:log_dist] [Rank 0] step=270, skipped=0, lr=[4.0499999999999995e-05, 4.0499999999999995e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 270 loss: 3.9714 iter time (s): 3.681 samples/sec: 69.553 +100.83.134.148: iteration 270/ 10000 | consumed samples: 69120 | consumed tokens: 141557760 | elapsed time per iteration (ms): 3680.5 | learning rate: 4.050E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.556 | TFLOPs: 79.41 | +100.83.134.158: [2024-04-26 09:54:28,522] [INFO] [logging.py:96:log_dist] [Rank 0] step=280, skipped=0, lr=[4.2e-05, 4.2e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 280 loss: 3.7347 iter time (s): 3.720 samples/sec: 68.819 +100.83.134.148: iteration 280/ 10000 | consumed samples: 71680 | consumed tokens: 146800640 | elapsed time per iteration (ms): 3719.4 | learning rate: 4.200E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.828 | TFLOPs: 78.58 | +100.83.134.158: [2024-04-26 09:55:05,968] [INFO] [logging.py:96:log_dist] [Rank 0] step=290, skipped=0, lr=[4.35e-05, 4.35e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 290 loss: 3.8177 iter time (s): 3.745 samples/sec: 68.360 +100.83.134.148: iteration 290/ 10000 | consumed samples: 74240 | consumed tokens: 152043520 | elapsed time per iteration (ms): 3744.7 | learning rate: 4.350E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.363 | TFLOPs: 78.05 | +100.83.134.158: [2024-04-26 09:55:43,866] [INFO] [logging.py:96:log_dist] [Rank 0] step=300, skipped=0, lr=[4.4999999999999996e-05, 4.4999999999999996e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 300 loss: 3.8041 iter time (s): 3.790 samples/sec: 67.548 +100.83.134.148: iteration 300/ 10000 | consumed samples: 76800 | consumed tokens: 157286400 | elapsed time per iteration (ms): 3789.7 | learning rate: 4.500E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.552 | TFLOPs: 77.12 | +100.83.134.158: [2024-04-26 09:56:20,561] [INFO] [logging.py:96:log_dist] [Rank 0] step=310, skipped=0, lr=[4.649999999999999e-05, 4.649999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 310 loss: 3.5969 iter time (s): 3.669 samples/sec: 69.766 +100.83.134.148: iteration 310/ 10000 | consumed samples: 79360 | consumed tokens: 162529280 | elapsed time per iteration (ms): 3669.7 | learning rate: 4.650E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.761 | TFLOPs: 79.64 | +100.83.134.158: [2024-04-26 09:56:57,618] [INFO] [logging.py:96:log_dist] [Rank 0] step=320, skipped=0, lr=[4.7999999999999994e-05, 4.7999999999999994e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 320 loss: 3.6136 iter time (s): 3.706 samples/sec: 69.084 +100.83.134.148: iteration 320/ 10000 | consumed samples: 81920 | consumed tokens: 167772160 | elapsed time per iteration (ms): 3705.6 | learning rate: 4.800E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.084 | TFLOPs: 78.87 | +100.83.134.158: [2024-04-26 09:57:35,820] [INFO] [logging.py:96:log_dist] [Rank 0] step=330, skipped=0, lr=[4.95e-05, 4.95e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 330 loss: 3.5642 iter time (s): 3.820 samples/sec: 67.013 +100.83.134.148: iteration 330/ 10000 | consumed samples: 84480 | consumed tokens: 173015040 | elapsed time per iteration (ms): 3820.0 | learning rate: 4.950E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.015 | TFLOPs: 76.51 | +100.83.134.158: [2024-04-26 09:58:13,408] [INFO] [logging.py:96:log_dist] [Rank 0] step=340, skipped=0, lr=[5.1e-05, 5.1e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 340 loss: 3.5243 iter time (s): 3.759 samples/sec: 68.104 +100.83.134.148: iteration 340/ 10000 | consumed samples: 87040 | consumed tokens: 178257920 | elapsed time per iteration (ms): 3759.0 | learning rate: 5.100E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.103 | TFLOPs: 77.75 | +100.83.134.158: [2024-04-26 09:58:49,797] [INFO] [logging.py:96:log_dist] [Rank 0] step=350, skipped=0, lr=[5.2499999999999995e-05, 5.2499999999999995e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 350 loss: 3.4946 iter time (s): 3.639 samples/sec: 70.356 +100.83.134.148: iteration 350/ 10000 | consumed samples: 89600 | consumed tokens: 183500800 | elapsed time per iteration (ms): 3638.8 | learning rate: 5.250E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 70.353 | TFLOPs: 80.32 | +100.83.134.158: [2024-04-26 09:59:27,041] [INFO] [logging.py:96:log_dist] [Rank 0] step=360, skipped=0, lr=[5.399999999999999e-05, 5.399999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 360 loss: 3.3915 iter time (s): 3.725 samples/sec: 68.728 +100.83.134.148: iteration 360/ 10000 | consumed samples: 92160 | consumed tokens: 188743680 | elapsed time per iteration (ms): 3724.6 | learning rate: 5.400E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.731 | TFLOPs: 78.47 | +100.83.134.158: [2024-04-26 10:00:03,828] [INFO] [logging.py:96:log_dist] [Rank 0] step=370, skipped=0, lr=[5.5499999999999994e-05, 5.5499999999999994e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 370 loss: 3.3911 iter time (s): 3.678 samples/sec: 69.599 +100.83.134.148: iteration 370/ 10000 | consumed samples: 94720 | consumed tokens: 193986560 | elapsed time per iteration (ms): 3678.3 | learning rate: 5.550E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.597 | TFLOPs: 79.46 | +100.83.134.158: [2024-04-26 10:00:40,753] [INFO] [logging.py:96:log_dist] [Rank 0] step=380, skipped=0, lr=[5.6999999999999996e-05, 5.6999999999999996e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 380 loss: 3.3740 iter time (s): 3.693 samples/sec: 69.329 +100.83.134.148: iteration 380/ 10000 | consumed samples: 97280 | consumed tokens: 199229440 | elapsed time per iteration (ms): 3692.6 | learning rate: 5.700E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.327 | TFLOPs: 79.15 | +100.83.134.158: [2024-04-26 10:01:18,538] [INFO] [logging.py:96:log_dist] [Rank 0] step=390, skipped=0, lr=[5.85e-05, 5.85e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 390 loss: 3.4332 iter time (s): 3.779 samples/sec: 67.743 +100.83.134.148: iteration 390/ 10000 | consumed samples: 99840 | consumed tokens: 204472320 | elapsed time per iteration (ms): 3778.6 | learning rate: 5.850E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.749 | TFLOPs: 77.35 | +100.83.134.158: [2024-04-26 10:01:56,717] [INFO] [logging.py:96:log_dist] [Rank 0] step=400, skipped=0, lr=[5.9999999999999995e-05, 5.9999999999999995e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 400 loss: 3.3010 iter time (s): 3.818 samples/sec: 67.051 +100.83.134.148: iteration 400/ 10000 | consumed samples: 102400 | consumed tokens: 209715200 | elapsed time per iteration (ms): 3817.8 | learning rate: 6.000E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.054 | TFLOPs: 76.55 | +100.83.134.158: [2024-04-26 10:02:35,658] [INFO] [logging.py:96:log_dist] [Rank 0] step=410, skipped=0, lr=[6.149999999999999e-05, 6.149999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 410 loss: 3.1951 iter time (s): 3.894 samples/sec: 65.737 +100.83.134.148: iteration 410/ 10000 | consumed samples: 104960 | consumed tokens: 214958080 | elapsed time per iteration (ms): 3894.2 | learning rate: 6.150E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 65.739 | TFLOPs: 75.05 | +100.83.134.158: [2024-04-26 10:03:12,256] [INFO] [logging.py:96:log_dist] [Rank 0] step=420, skipped=0, lr=[6.3e-05, 6.3e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 420 loss: 3.2765 iter time (s): 3.659 samples/sec: 69.959 +100.83.134.148: iteration 420/ 10000 | consumed samples: 107520 | consumed tokens: 220200960 | elapsed time per iteration (ms): 3659.6 | learning rate: 6.300E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 69.954 | TFLOPs: 79.86 | +100.83.134.158: [2024-04-26 10:03:49,475] [INFO] [logging.py:96:log_dist] [Rank 0] step=430, skipped=0, lr=[6.449999999999998e-05, 6.449999999999998e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 430 loss: 3.2809 iter time (s): 3.723 samples/sec: 68.767 +100.83.134.148: iteration 430/ 10000 | consumed samples: 110080 | consumed tokens: 225443840 | elapsed time per iteration (ms): 3722.1 | learning rate: 6.450E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.779 | TFLOPs: 78.52 | +100.83.134.158: [2024-04-26 10:04:27,203] [INFO] [logging.py:96:log_dist] [Rank 0] step=440, skipped=0, lr=[6.599999999999999e-05, 6.599999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 440 loss: 3.1279 iter time (s): 3.773 samples/sec: 67.850 +100.83.134.148: iteration 440/ 10000 | consumed samples: 112640 | consumed tokens: 230686720 | elapsed time per iteration (ms): 3772.5 | learning rate: 6.600E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.859 | TFLOPs: 77.47 | +100.83.134.158: [2024-04-26 10:05:04,392] [INFO] [logging.py:96:log_dist] [Rank 0] step=450, skipped=0, lr=[6.749999999999999e-05, 6.749999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 450 loss: 3.1501 iter time (s): 3.719 samples/sec: 68.828 +100.83.134.148: iteration 450/ 10000 | consumed samples: 115200 | consumed tokens: 235929600 | elapsed time per iteration (ms): 3719.1 | learning rate: 6.750E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.833 | TFLOPs: 78.58 | +100.83.134.158: [2024-04-26 10:05:41,981] [INFO] [logging.py:96:log_dist] [Rank 0] step=460, skipped=0, lr=[6.9e-05, 6.9e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 460 loss: 2.8835 iter time (s): 3.759 samples/sec: 68.099 +100.83.134.148: iteration 460/ 10000 | consumed samples: 117760 | consumed tokens: 241172480 | elapsed time per iteration (ms): 3758.9 | learning rate: 6.900E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.105 | TFLOPs: 77.75 | +100.83.134.158: [2024-04-26 10:06:20,635] [INFO] [logging.py:96:log_dist] [Rank 0] step=470, skipped=0, lr=[7.049999999999999e-05, 7.049999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 470 loss: 3.0736 iter time (s): 3.865 samples/sec: 66.230 +100.83.134.148: iteration 470/ 10000 | consumed samples: 120320 | consumed tokens: 246415360 | elapsed time per iteration (ms): 3865.2 | learning rate: 7.050E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 66.231 | TFLOPs: 75.61 | +100.83.134.158: [2024-04-26 10:06:58,166] [INFO] [logging.py:96:log_dist] [Rank 0] step=480, skipped=0, lr=[7.199999999999999e-05, 7.199999999999999e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 480 loss: 2.8947 iter time (s): 3.753 samples/sec: 68.204 +100.83.134.148: iteration 480/ 10000 | consumed samples: 122880 | consumed tokens: 251658240 | elapsed time per iteration (ms): 3753.1 | learning rate: 7.200E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.210 | TFLOPs: 77.87 | +100.83.134.158: [2024-04-26 10:07:35,531] [INFO] [logging.py:96:log_dist] [Rank 0] step=490, skipped=0, lr=[7.35e-05, 7.35e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 490 loss: 2.9935 iter time (s): 3.736 samples/sec: 68.516 +100.83.134.148: iteration 490/ 10000 | consumed samples: 125440 | consumed tokens: 256901120 | elapsed time per iteration (ms): 3736.5 | learning rate: 7.350E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 68.513 | TFLOPs: 78.22 | +100.83.134.158: [2024-04-26 10:08:13,180] [INFO] [logging.py:96:log_dist] [Rank 0] step=500, skipped=0, lr=[7.5e-05, 7.5e-05], mom=[(0.9, 0.95), (0.9, 0.95)] +100.83.134.158: steps: 500 loss: 2.8190 iter time (s): 3.765 samples/sec: 67.997 +100.83.134.148: iteration 500/ 10000 | consumed samples: 128000 | consumed tokens: 262144000 | elapsed time per iteration (ms): 3764.9 | learning rate: 7.500E-05 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 67.997 | TFLOPs: 77.63 | +100.83.134.148: 2024-04-26 10:08:13 Start last rank evaluation +100.83.134.158: Evaluating iter 10/10 +100.83.134.148: --------------------------------------------------------------------------------------------------------------------- +100.83.134.148: 2024-04-26 10:08:25 | validation loss at iteration 500 | lm loss value: 2.880215E+00 | lm loss PPL: 1.781811E+01 | +100.83.134.148: --------------------------------------------------------------------------------------------------------------------- +100.83.134.158: saving checkpoint at iteration 500 to /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2 +100.83.134.158: [2024-04-26 10:08:25,349] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step500 is about to be saved! +100.83.134.158: [2024-04-26 10:08:25,530] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_01-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:25,783] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_01-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:25,899] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_03-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:25,991] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_03-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,044] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_04-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,137] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_04-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,186] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_05-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,276] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_05-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,325] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_06-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,415] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_06-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,458] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_07-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,549] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_07-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,592] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_08-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,682] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_08-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,720] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_09-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,789] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_09-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,825] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_10-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,895] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_10-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:26,930] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_11-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:26,999] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_11-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,033] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_12-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,101] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_12-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,143] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_13-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,211] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_13-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,267] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_14-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,334] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_14-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,367] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_15-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,436] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_15-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,482] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_16-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,552] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_16-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,614] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_17-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,685] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_17-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,748] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_18-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,818] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_18-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:27,882] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_19-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:27,953] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_19-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,017] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_20-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:28,096] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_20-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,165] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_21-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:28,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_21-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,341] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_22-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:28,428] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_22-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,509] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_23-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:28,592] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_23-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,672] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_24-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:28,755] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_24-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,835] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_25-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:28,917] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_25-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:28,996] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_26-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:29,077] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_26-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:29,081] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_28-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:29,087] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_28-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:29,274] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_29-model_00-model_states.pt... +100.83.134.158: [2024-04-26 10:08:29,479] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/layer_29-model_00-model_states.pt. +100.83.134.158: [2024-04-26 10:08:29,490] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/mp_rank_00_model_states.pt +100.83.134.158: [2024-04-26 10:08:29,490] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/mp_rank_00_model_states.pt... +100.83.134.158: [2024-04-26 10:08:29,510] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/mp_rank_00_model_states.pt. +100.83.134.148: [2024-04-26 10:08:29,545] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,545] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,545] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,545] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:29,546] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,547] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,547] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,547] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,547] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:29,547] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,025] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,026] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,039] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,040] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,051] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,051] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,055] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,055] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,058] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,058] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,096] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,096] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,104] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,104] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,109] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,110] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,133] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,134] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,135] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,136] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,136] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,136] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt... +100.83.134.148: [2024-04-26 10:08:31,146] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:31,147] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,175] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,175] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,183] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,183] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,192] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,193] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:31,243] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:31,243] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +100.83.134.158: [2024-04-26 10:08:32,861] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:32,862] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:32,862] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:33,001] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:33,001] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:33,002] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,010] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,010] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,011] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,310] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,310] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,310] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:33,338] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:33,339] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:33,339] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,476] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,477] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,477] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,491] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,491] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,492] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,498] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,498] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_9_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,498] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,511] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,512] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,512] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,538] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,539] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,539] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.148: [2024-04-26 10:08:33,552] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt. +100.83.134.148: [2024-04-26 10:08:33,553] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt +100.83.134.148: [2024-04-26 10:08:33,553] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:33,556] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:33,557] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:33,557] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:34,017] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:34,018] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:34,018] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:34,072] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:34,072] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:34,073] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:34,328] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:34,328] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:34,328] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: [2024-04-26 10:08:34,761] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +100.83.134.158: [2024-04-26 10:08:35,942] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +100.83.134.158: [2024-04-26 10:08:35,942] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step500 is ready now! +100.83.134.158: successfully saved checkpoint at iteration 500 to /data/output/llama13b_600M/26-04-2024-09:36:12/checkpoints_zero_stage_2 +100.83.134.158: 3D configuration: DP=16 TP=1 PP=1 +100.83.134.158: +100.83.134.158: Verify ** layer_ ** files +100.83.134.158: +100.83.134.158: Checking pp_stage=0 +100.83.134.158: 3.input_layernorm.weight: OK [n=1] +100.83.134.158: 3.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 4.input_layernorm.weight: OK [n=1] +100.83.134.158: 4.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 5.input_layernorm.weight: OK [n=1] +100.83.134.158: 5.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 6.input_layernorm.weight: OK [n=1] +100.83.134.158: 6.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 7.input_layernorm.weight: OK [n=1] +100.83.134.158: 7.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 8.input_layernorm.weight: OK [n=1] +100.83.134.158: 8.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 9.input_layernorm.weight: OK [n=1] +100.83.134.158: 9.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 10.input_layernorm.weight: OK [n=1] +100.83.134.158: 10.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 11.input_layernorm.weight: OK [n=1] +100.83.134.158: 11.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 12.input_layernorm.weight: OK [n=1] +100.83.134.158: 12.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 13.input_layernorm.weight: OK [n=1] +100.83.134.158: 13.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 14.input_layernorm.weight: OK [n=1] +100.83.134.158: 14.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 15.input_layernorm.weight: OK [n=1] +100.83.134.158: 15.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 16.input_layernorm.weight: OK [n=1] +100.83.134.158: 16.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 17.input_layernorm.weight: OK [n=1] +100.83.134.158: 17.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 18.input_layernorm.weight: OK [n=1] +100.83.134.158: 18.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 19.input_layernorm.weight: OK [n=1] +100.83.134.158: 19.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 20.input_layernorm.weight: OK [n=1] +100.83.134.158: 20.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 21.input_layernorm.weight: OK [n=1] +100.83.134.158: 21.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 22.input_layernorm.weight: OK [n=1] +100.83.134.158: 22.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 23.input_layernorm.weight: OK [n=1] +100.83.134.158: 23.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 24.input_layernorm.weight: OK [n=1] +100.83.134.158: 24.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 25.input_layernorm.weight: OK [n=1] +100.83.134.158: 25.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 26.input_layernorm.weight: OK [n=1] +100.83.134.158: 26.post_attention_layernorm.weight: OK [n=1] +100.83.134.158: 28.final_rmsnorm.weight: OK [n=1] +100.83.134.158: +100.83.134.158: Checkpoint layer files OK +100.83.134.158: +100.83.134.158: Verify ** bf16_zero_ ** files +100.83.134.158: +100.83.134.158: Checking pp_stage=0 dp_stage=0 +100.83.134.158: bf16 zero files: 0%| | 0/1 [00:00 ${#new_files[@]} ]; then + touch $old_ckpt_file + readarray -t old_files < $old_ckpt_file +fi; +new_ckpts=($(echo ${old_files[@]} ${new_files[@]} | tr ' ' '\n' | sort | uniq -u )) +for ckpt in ${new_ckpts[@]}; do + echo "evaluation running for ${ckpt}..." + convert_ds_to_univ="python $tools_dir/convert_checkpoint/ds_to_universal.py --input_folder $old_ckpt_dir/$ckpt --output_folder $univ_ckpt_dir/$ckpt" + convert_univ_to_hf="python $tools_dir/convert_checkpoint/universal_to_huggingface.py --universal-dir $univ_ckpt_dir/$ckpt --hf-dir $hf_ckpt_dir/$ckpt --model-type llama --config $ckpt_dir/mds_to_hf_llama_custom.json" + #run_cmd="accelerate launch --main_process_port 39500 -m lm_eval --model hf --model_args pretrained=$hf_ckpt_dir/$ckpt --tasks hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc --batch_size auto >$old_ckpt_dir/$ckpt/debug.out 2>$old_ckpt_dir/$ckpt/debug.err" + run_cmd="accelerate launch --main_process_port 39500 -m lm_eval --model hf --model_args pretrained=$hf_ckpt_dir/$ckpt --tasks indiccopa-hi --batch_size auto --wandb_args project=bharatgpt,group=trial_expt_1" + cd /mnt/weka/peacock/idc/mint/docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed_old/ + eval $convert_ds_to_univ + eval $convert_univ_to_hf + cp /mnt/weka/peacock/idc/cronscript/ckpts/hf_ckpt/tokenizer.model $hf_ckpt_dir/$ckpt/ + cd /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness + eval $run_cmd + #echo $ckpt >> $old_ckpt_file +done;