diff --git "a/llama13b_5M/log.txt" "b/llama13b_5M/log.txt" new file mode 100644--- /dev/null +++ "b/llama13b_5M/log.txt" @@ -0,0 +1,1496 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:21:56,617] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-24 07:21:58,125] [INFO] [runner.py:585:main] cmd = /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMTM0LjEzMiI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --master_addr=100.83.134.132 --master_port=29500 --no_python --no_local_rank --enable_each_rank_log=None /usr/bin/bash -c cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 2 --pipeline-model-parallel-size 2 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 16 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 20 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_5M//tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_5M//checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_5M//ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_5M//checkpoints_zero_stage_2 --save-interval 20 --verify-checkpoint --verify-checkpoint-model-type LLAMA +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:21:59,707] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-24 07:22:00,960] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.132': [0, 1, 2, 3, 4, 5, 6, 7]} +[2024-04-24 07:22:00,960] [INFO] [launch.py:152:main] nnodes=1, num_local_procs=8, node_rank=0 +[2024-04-24 07:22:00,960] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.134.132': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2024-04-24 07:22:00,960] [INFO] [launch.py:164:main] dist_world_size=8 +[2024-04-24 07:22:00,960] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,851] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,852] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,876] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,908] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,920] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,921] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,925] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,931] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 5 +args.world_size: 8 +args.rank: 5 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 4 +args.world_size: 8 +args.rank: 4 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 1 +args.world_size: 8 +args.rank: 1 +args.distributed_backend: hccl +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +_initialize_distributed: Initializing with below params: +args.local_rank: 3 +args.world_size: 8 +args.rank: 3 +args.distributed_backend: hccl +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +using world size: 8, data-parallel-size: 2, tensor-model-parallel size: 2, pipeline-model-parallel size: 2 +accumulate and all-reduce gradients in fp32 for bfloat16 data type. +using torch.bfloat16 for parameters ... +------------------------ arguments ------------------------ + accumulate_allreduce_grads_in_fp32 .............. True + activation_func_type ............................ swiglu + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.95 + adam_eps ........................................ 1e-06 + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + aml_data_download_path .......................... None + apply_layernorm_weight_plus_one ................. False + apply_query_key_layer_scaling ................... True + apply_residual_connection_post_layernorm ........ False + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + bert_binary_head ................................ True + bert_load ....................................... None + bf16 ............................................ True + bias_dropout_fusion ............................. False + bias_gelu_fusion ................................ False + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + cache_fp8_weight ................................ False + cache_fp8_weight_fwd ............................ True + checkpoint_activations .......................... False + checkpoint_activations_granularity .............. full + checkpoint_in_cpu ............................... False + checkpoint_num_layers ........................... 1 + clearml_config_path ............................. None + clearml_continue_exp ............................ False + clearml_exp_name ................................ None + clip_grad ....................................... 1.0 + compression_training ............................ False + consumed_train_samples .......................... 0 + consumed_train_tokens ........................... 0 + consumed_valid_samples .......................... 0 + contigious_checkpointing ........................ False + cpu_optimizer ................................... False + cpu_torch_adam .................................. False + create_moe_param_group .......................... False + curriculum_learning ............................. False + data_idx_path ................................... None + data_impl ....................................... infer + data_parallel_size .............................. 2 + data_path ....................................... ['/data/arxiv//tokenized_text_document'] + data_sharding ................................... True + dataloader_type ................................. single + DDP_impl ........................................ local + decoder_seq_length .............................. None + deepscale ....................................... False + deepscale_config ................................ None + deepspeed ....................................... True + deepspeed_activation_checkpointing .............. False + deepspeed_config ................................ /data/output/llama13b_5M//ds_config.json + deepspeed_mpi ................................... False + distribute_checkpointed_activations ............. False + distributed_backend ............................. hccl + do_layernorm_bias_weight_decay .................. False + do_pretrain_validation .......................... False + ds_inference .................................... False + ds_pipeline_enabled ............................. True + embed_layernorm ................................. False + embedding_path .................................. None + enable_expert_tensor_parallelism ................ False + encoder_seq_length .............................. 2048 + eod_mask_loss ................................... False + eval_interval ................................... 20 + eval_iters ...................................... 10 + eval_loss_exit_value ............................ None + eval_micro_batch_size ........................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... 0 + expert_interval ................................. 2 + ffn_hidden_coeff ................................ 2.6666666666666665 + ffn_hidden_size ................................. 13824 + finetune ........................................ False + fix_position_emb_redundant_alloc ................ False + flatten_linear_operands ......................... False + fp16 ............................................ False + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + global_batch_size ............................... 256 + hidden_dropout .................................. 0.1 + hidden_size ..................................... 5120 + hidden_size_teacher ............................. None + hpu_deterministic ............................... True + hpu_fp8_format .................................. e5m2 + hpu_fp8_measure_interval ........................ 10 + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_dim ......................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference ....................................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + initial_loss_scale .............................. 4294967296 + kd .............................................. False + kd_alpha_ce ..................................... 1 + kd_beta_ce ...................................... 1 + kd_temp ......................................... 1.0 + kill_switch_path ................................ None + kv_channels ..................................... 128 + layernorm_epsilon ............................... 1e-06 + layernorm_type .................................. rmsnorm + lazy_mpu_init ................................... None + load ............................................ /data/output/llama13b_5M//checkpoints_zero_stage_2 + load_teacher .................................... None + local_rank ...................................... 0 + log_batch_size_to_tensorboard ................... True + log_bwd_grads ................................... False + log_fwd_activations ............................. False + log_interval .................................... 10 + log_learning_rate_to_tensorboard ................ True + log_loss_scale_to_tensorboard ................... True + log_model_inputs ................................ False + log_num_zeros_in_grad ........................... False + log_optimizer_states_to_tensorboard ............. False + log_params_norm ................................. False + log_timers_to_tensorboard ....................... True + log_validation_ppl_to_tensorboard ............... True + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0003 + lr_decay_iters .................................. None + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_decay_tokens ................................. None + lr_warmup_fraction .............................. None + lr_warmup_iters ................................. 2000 + lr_warmup_samples ............................... 0 + lr_warmup_tokens ................................ None + make_vocab_size_divisible_by .................... 128 + mask_prob ....................................... 0.15 + mask_tensor_adding .............................. False + masked_softmax_fusion ........................... False + max_position_embeddings ......................... None + memory_centric_tiled_linear ..................... False + merge_file ...................................... /data/arxiv//gpt2-merges.txt + micro_batch_size ................................ 1 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_type ........................................ standard + mmap_warmup ..................................... False + moe_eval_capacity_factor ........................ 1.0 + moe_expert_parallel_size ........................ 1 + moe_loss_coeff .................................. 0.1 + moe_min_capacity ................................ 4 + moe_token_dropping .............................. True + moe_train_capacity_factor ....................... 1.0 + mos ............................................. False + no_bias ......................................... True + no_cuda ......................................... False + no_load_lr_state ................................ False + no_load_optim ................................... None + no_load_rng ..................................... None + no_pipeline_parallel ............................ False + no_save_optim ................................... None + no_save_rng ..................................... None + no_scaled_init .................................. False + num_attention_heads ............................. 40 + num_attention_heads_teacher ..................... None + num_channels .................................... 3 + num_classes ..................................... 1000 + num_experts ..................................... [1] + num_experts_teacher ............................. [1] + num_key_value_heads ............................. 40 + num_layers ...................................... 16 + num_layers_per_virtual_pipeline_stage ........... None + num_layers_teacher .............................. None + num_workers ..................................... 2 + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adamw + override_lr_scheduler ........................... False + params_dtype .................................... torch.bfloat16 + partition_activations ........................... False + patch_dim ....................................... 16 + pipeline_model_parallel_size .................... 2 + position_embedding_type ......................... PositionEmbeddingType.rotary + profile ......................................... None + profile_backward ................................ False + profile_steps ................................... 2,3 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + remote_device ................................... none + reset_attention_mask ............................ False + reset_iteration ................................. False + reset_position_ids .............................. False + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + sample_rate ..................................... 1.0 + save ............................................ /data/output/llama13b_5M//checkpoints_zero_stage_2 + save_interval ................................... 20 + scatter_gather_tensors_in_pipeline .............. True + scattered_embeddings ............................ False + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... True + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + split ........................................... 969, 30, 1 + split_transformers .............................. False + synchronize_each_layer .......................... False + tensor_logger_max_iter .......................... 0 + tensor_logger_path .............................. None + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. /data/output/llama13b_5M//tensorboard + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + tile_factor ..................................... 1 + titles_data_path ................................ None + tokenizer_eod_id ................................ None + tokenizer_model_file ............................ None + tokenizer_type .................................. GPT2BPETokenizer + topk ............................................ 1 + train_data_path ................................. None + train_iters ..................................... 10000 + train_samples ................................... None + train_tokens .................................... None + universal_checkpoint ............................ False + use_checkpoint_lr_scheduler ..................... False + use_contiguous_buffers_in_ddp ................... True + use_cpu_initialization .......................... None + use_fused_sdpa .................................. True + use_fused_sdpa_with_recompute ................... False + use_hpu ......................................... True + use_hpu_fp8_transformer_engine .................. False + use_hpu_graphs .................................. False + use_one_sent_docs ............................... False + use_pin_memory .................................. False + use_rotary_v2 ................................... False + use_seq_len_plus_one_tokens ..................... True + use_torch_compile ............................... False + use_tutel ....................................... False + valid_data_path ................................. None + verify_checkpoint ............................... True + verify_checkpoint_model_type .................... LLAMA + verify_tp_workers ............................... False + verify_tp_workers_hash .......................... False + virtual_pipeline_model_parallel_size ............ None + vocab_extra_ids ................................. 0 + vocab_file ...................................... /data/arxiv//gpt2-vocab.json + weight_decay .................................... 0.1 + world_size ...................................... 8 + zero_allgather_bucket_size ...................... 0.0 + zero_contigious_gradients ....................... False + zero_reduce_bucket_size ......................... 0.0 + zero_reduce_scatter ............................. False + zero_stage ...................................... 0 +-------------------- end of arguments --------------------- +setting number of micro-batches to constant 128 +setting number of micro-batches to constant 128 +> building GPT2BPETokenizer tokenizer ... +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,063] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,063] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,069] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,069] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 6 +args.world_size: 8 +args.rank: 6 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,086] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,086] [INFO] [comm.py:637:init_distributed] cdb=None +> setting tensorboard ... +_initialize_distributed: Initializing with below params: +args.local_rank: 7 +args.world_size: 8 +args.rank: 7 +args.distributed_backend: hccl + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +_initialize_distributed: Initializing with below params: +args.local_rank: 0 +args.world_size: 8 +args.rank: 0 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,101] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,101] [INFO] [comm.py:637:init_distributed] cdb=None +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,136] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,136] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,152] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,152] [INFO] [comm.py:637:init_distributed] cdb=None +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +> initializing torch distributed ... +[2024-04-24 07:22:06,173] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,173] [INFO] [comm.py:637:init_distributed] cdb=None +[2024-04-24 07:22:06,173] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 2 +args.world_size: 8 +args.rank: 2 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,282] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,282] [INFO] [comm.py:637:init_distributed] cdb=None +> initializing tensor model parallel with size 2 +> initializing pipeline model parallel with size 2 +> setting random seeds to 1234 ... +> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +============================= HABANA PT BRIDGE CONFIGURATION =========================== + PT_HPU_LAZY_MODE = 1 + PT_RECIPE_CACHE_PATH = + PT_CACHE_FOLDER_DELETE = 0 + PT_HPU_RECIPE_CACHE_CONFIG = + PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807 + PT_HPU_LAZY_ACC_PAR_MODE = 0 + PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0 +---------------------------: System Configuration :--------------------------- +Num CPU Cores : 160 +CPU RAM : 1056375244 KB +------------------------------------------------------------------------------ +> compiling dataset index builder ... +make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +make: Nothing to be done for 'default'. +make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +>>> done with dataset index builder. Compilation time: 0.180 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 0.004 seconds +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-mp0t9uy4 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run youthful-wildflower-2012 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/mp0t9uy4 +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-w9athpv9 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run different-planet-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/w9athpv9 +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-211n5b2u +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run jolly-pyramid-2013 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/211n5b2u +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-tbva9yik +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run earthy-plasma-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/tbva9yik +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-h27k7fos +wandb: Run `wandb offline` to turn off syncing. +wandb: Tracking run with wandb version 0.16.6 +wandb: Syncing run pleasant-glitter-2017 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/h27k7fos +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-pu9rwbfz +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run zany-snow-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/pu9rwbfz +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-ph1uqt0g +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run hopeful-pine-2018 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ph1uqt0g +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-amj6vf90 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run swept-sunset-2019 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/amj6vf90 +time to initialize megatron (seconds): 38.936 +[after megatron is initialized] datetime: 2024-04-24 07:22:14 +building LLaMA model ... +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +*************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** + + + +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** + + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** + +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 1397964800 + > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 1397969920 + > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 1397969920 +[2024-04-24 07:22:15,116] [INFO] [utils.py:824:see_memory_usage] Before Building Model +[2024-04-24 07:22:15,119] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:15,120] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 369.82 GB, percent = 36.7% +SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=0, model=1): 1, ProcessCoord(pipe=0, data=1, model=0): 2, ProcessCoord(pipe=0, data=1, model=1): 3, ProcessCoord(pipe=1, data=0, model=0): 4, ProcessCoord(pipe=1, data=0, model=1): 5, ProcessCoord(pipe=1, data=1, model=0): 6, ProcessCoord(pipe=1, data=1, model=1): 7} +[2024-04-24 07:22:15,122] [INFO] [module.py:375:_partition_layers] Partitioning pipeline stages with method type:transformer +stage=0 layers=11 + 0: _to_float16 + 1: EmbeddingPipe + 2: + 3: ParallelTransformerLayerPipe + 4: ParallelTransformerLayerPipe + 5: ParallelTransformerLayerPipe + 6: ParallelTransformerLayerPipe + 7: ParallelTransformerLayerPipe + 8: ParallelTransformerLayerPipe + 9: ParallelTransformerLayerPipe + 10: ParallelTransformerLayerPipe +stage=1 layers=13 + 11: ParallelTransformerLayerPipe + 12: ParallelTransformerLayerPipe + 13: ParallelTransformerLayerPipe + 14: ParallelTransformerLayerPipe + 15: ParallelTransformerLayerPipe + 16: ParallelTransformerLayerPipe + 17: ParallelTransformerLayerPipe + 18: ParallelTransformerLayerPipe + 19: + 20: WrapName + 21: WrapName + 22: + 23: float16_to_fp32 + loss: CrossEntropy +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +[2024-04-24 07:22:15,303] [INFO] [utils.py:824:see_memory_usage] After Building Model +[2024-04-24 07:22:15,306] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:15,307] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 370.67 GB, percent = 36.8% + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 1397964800 +> learning rate decay style: cosine +DeepSpeed is enabled. +[2024-04-24 07:22:15,310] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.4+hpu.synapse.v1.14.0, git-hash=fad45b2, git-branch=1.14.0 +[2024-04-24 07:22:16,123] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False +[2024-04-24 07:22:16,123] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] Creating BF16 optimizer +[2024-04-24 07:22:16,199] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,200] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,201] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,202] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,204] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,215] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,274] [INFO] [utils.py:824:see_memory_usage] begin bf16_optimizer +[2024-04-24 07:22:16,278] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 2.63 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,278] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.0 GB, percent = 36.8% +[2024-04-24 07:22:16,325] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,412] [INFO] [utils.py:824:see_memory_usage] before initializing group 0 +[2024-04-24 07:22:16,415] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,416] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 370.89 GB, percent = 36.8% +[2024-04-24 07:22:16,748] [INFO] [utils.py:824:see_memory_usage] after initializing group 0 +[2024-04-24 07:22:16,752] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 5.22 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,752] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:16,858] [INFO] [utils.py:824:see_memory_usage] before initializing group 1 +[2024-04-24 07:22:16,862] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,862] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:16,982] [INFO] [utils.py:824:see_memory_usage] after initializing group 1 +[2024-04-24 07:22:16,986] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 10.43 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,986] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:17,116] [INFO] [utils.py:824:see_memory_usage] before initialize_optimizer +[2024-04-24 07:22:17,120] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,120] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.03 GB, percent = 36.8% +[2024-04-24 07:22:17,251] [INFO] [utils.py:824:see_memory_usage] end initialize_optimizer +[2024-04-24 07:22:17,255] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,255] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.04 GB, percent = 36.8% +[2024-04-24 07:22:17,388] [INFO] [utils.py:824:see_memory_usage] end bf16_optimizer +[2024-04-24 07:22:17,392] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,392] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.02 GB, percent = 36.8% +[2024-04-24 07:22:17,393] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = BF16_Optimizer +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.95), (0.9, 0.95)] +[2024-04-24 07:22:17,394] [INFO] [config.py:992:print] DeepSpeedEngine configuration: +[2024-04-24 07:22:17,394] [INFO] [config.py:996:print] activation_checkpointing_config { + "partition_activations": false, + "contiguous_memory_optimization": false, + "cpu_checkpointing": false, + "number_checkpoints": null, + "synchronize_checkpoint_boundary": false, + "profile": false +} +[2024-04-24 07:22:17,394] [INFO] [config.py:996:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] amp_enabled .................. False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] amp_params ................... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] autotuning_config ............ { + "enabled": false, + "start_step": null, + "end_step": null, + "metric_path": null, + "arg_mappings": null, + "metric": "throughput", + "model_info": null, + "results_dir": "autotuning_results", + "exps_dir": "autotuning_exps", + "overwrite": true, + "fast": true, + "start_profile_step": 3, + "end_profile_step": 5, + "tuner_type": "gridsearch", + "tuner_early_stopping": 5, + "tuner_num_trials": 50, + "model_info_path": null, + "mp_size": 1, + "max_train_batch_size": null, + "min_train_batch_size": 1, + "max_train_micro_batch_size_per_gpu": 1.024000e+03, + "min_train_micro_batch_size_per_gpu": 1, + "num_tuning_micro_batch_sizes": 3 +} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] bfloat16_accumulate_grads_via_hooks True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] bfloat16_enabled ............. True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_parallel_write_pipeline False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_tag_validation_enabled True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_tag_validation_fail False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] comms_config ................. +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] communication_data_type ...... None +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] curriculum_enabled_legacy .... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] curriculum_params_legacy ..... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] data_efficiency_enabled ...... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dataloader_drop_last ......... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] disable_allgather ............ False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dump_state ................... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dynamic_loss_scale_args ...... None +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_enabled ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_gas_boundary_resolution 1 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_layer_name ........ bert.encoder.layer +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_layer_num ......... 0 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_max_iter .......... 100 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_stability ......... 1e-06 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_tol ............... 0.01 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_verbose ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] elasticity_enabled ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] flops_profiler_config ........ { + "enabled": false, + "recompute_fwd_factor": 0.0, + "profile_step": 1, + "module_depth": -1, + "top_modules": 1, + "detailed": true, + "output_file": null +} +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] fp16_auto_cast ............... None +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] fp16_enabled ................. False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] fp16_master_weights_and_gradients False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] global_rank .................. 0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] grad_accum_dtype ............. None +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_accumulation_steps .. 128 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_clipping ............ 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_predivide_factor .... 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] initial_dynamic_scale ........ 1 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] load_universal_checkpoint .... False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] loss_scale ................... 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] memory_breakdown ............. False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] mics_hierarchial_params_gather False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] mics_shard_size .............. -1 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] nebula_config ................ { + "enabled": false, + "persistent_storage_path": null, + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true, + "load_path": null +} +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] optimizer_legacy_fusion ...... False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] optimizer_name ............... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] optimizer_params ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': False, 'grad_partitioned': False} +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pld_enabled .................. False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pld_params ................... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] prescale_gradients ........... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] scheduler_name ............... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] scheduler_params ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] seq_parallel_communication_data_type torch.float32 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] sparse_attention ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] sparse_gradients_enabled ..... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] steps_per_print .............. 10 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] train_batch_size ............. 256 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] train_micro_batch_size_per_gpu 1 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] use_data_before_expert_parallel_ False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] use_node_local_storage ....... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] wall_clock_breakdown ......... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] weight_quantization_config ... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] world_size ................... 2 +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_allow_comm_data_type_fp32 False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_allow_untested_optimizer False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_config .................. stage=0 contiguous_gradients=True reduce_scatter=False reduce_bucket_size=500,000,000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False use_all_reduce_for_fetch_params=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_enabled ................. False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_force_ds_cpu_optimizer .. True +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_optimization_stage ...... 0 +[2024-04-24 07:22:17,399] [INFO] [config.py:982:print_user_config] json = { + "train_batch_size": 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": { + "enabled": false + }, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} +[2024-04-24 07:22:17,399] [INFO] [engine.py:99:__init__] CONFIG: micro_batches=128 micro_batch_size=1 +[2024-04-24 07:22:17,399] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=0 STAGE=0 LAYERS=11 [0, 11) STAGE_PARAMS=1397964800 (1397.965M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=1 STAGE=0 LAYERS=11 [0, 11) STAGE_PARAMS=1397964800 (1397.965M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=4 STAGE=1 LAYERS=13 [11, 24) STAGE_PARAMS=1397969920 (1397.970M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,670] [INFO] [engine.py:180:__init__] RANK=5 STAGE=1 LAYERS=13 [11, 24) STAGE_PARAMS=1397969920 (1397.970M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,672] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,673] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +WARNING: could not find the metadata file /data/output/llama13b_5M//checkpoints_zero_stage_2 +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. + will not load any checkpoints and will start from random +time (ms) | load-checkpoint: 2.70 +[after model, optimizer, and learning rate scheduler are built] datetime: 2024-04-24 07:22:17 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 2560000 + validation: 1282560 + test: 2560 +> building train, validation, and test datasets for GPT ... +Single data path provided for train, valid & test + > building dataset index ... + reading sizes... + reading pointers... + reading document index... + creating numpy buffer of mmap... + creating memory view of numpy buffer... + > finished creating indexed dataset in 0.010700 seconds + number of documents: 1558306 + > dataset split: + train: + document indices in [0, 1509999) total of 1509999 documents + validation: + document indices in [1509999, 1556748) total of 46749 documents + test: + document indices in [1556748, 1558306) total of 1558 documents +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy + + > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.004 seconds + total number of samples: 15244235 + total number of epochs: 1 +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy + + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy + > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + + + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.003 seconds + total number of samples: 1443484 + total number of epochs: 3 +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy + + + > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + + + > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.032 seconds + total number of samples: 16581 + total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2024-04-24 07:22:19 +done with setup ... +training ... +time (ms) | model-and-optimizer-setup: 2739.98 | train/valid/test-data-iterators-setup: 1518.83 +[before the start of training step] datetime: 2024-04-24 07:22:19 +******************INIT_SAVING_20************************************INIT_SAVING_20****************** + +******************INIT_SAVING_20************************************INIT_SAVING_20************************************INIT_SAVING_20************************************INIT_SAVING_20****************** +******************INIT_SAVING_20****************** + + + + iteration 10/ 10000 | consumed samples: 2560 | consumed tokens: 5242880 | elapsed time per iteration (ms): 0.5 | learning rate: 0.000E+00 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 514096.440 | TFLOPs: 4392335.75 | + iteration 20/ 10000 | consumed samples: 5120 | consumed tokens: 10485760 | elapsed time per iteration (ms): 0.6 | learning rate: 0.000E+00 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 431793.873 | TFLOPs: 3689159.30 | +******************INIT_SAVING_20****************** +saving checkpoint at iteration 20 to /data/output/llama13b_5M//checkpoints_zero_stage_2 +[2024-04-24 07:22:19,375] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step0 is about to be saved! +[2024-04-24 07:22:19,569] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_00-model_states.pt... +[2024-04-24 07:22:19,574] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_01-model_states.pt... +[2024-04-24 07:22:19,615] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_01-model_states.pt... +[2024-04-24 07:22:19,624] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_00-model_states.pt... +[2024-04-24 07:22:19,814] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_00-model_states.pt. +[2024-04-24 07:22:19,835] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_01-model_states.pt. +[2024-04-24 07:22:19,910] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_01-model_states.pt. +[2024-04-24 07:22:19,914] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_00-model_states.pt... +[2024-04-24 07:22:19,938] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_01-model_states.pt... +[2024-04-24 07:22:19,944] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_00-model_states.pt. +[2024-04-24 07:22:20,001] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_01-model_states.pt... +[2024-04-24 07:22:20,035] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_00-model_states.pt... +[2024-04-24 07:22:20,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_00-model_states.pt. +[2024-04-24 07:22:20,276] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_01-model_states.pt. +[2024-04-24 07:22:20,292] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_01-model_states.pt. +[2024-04-24 07:22:20,326] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_00-model_states.pt. +[2024-04-24 07:22:20,332] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_00-model_states.pt... +[2024-04-24 07:22:20,367] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_01-model_states.pt... +[2024-04-24 07:22:20,376] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_01-model_states.pt... +[2024-04-24 07:22:20,426] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_00-model_states.pt... +[2024-04-24 07:22:20,617] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_00-model_states.pt. +[2024-04-24 07:22:20,682] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_01-model_states.pt. +[2024-04-24 07:22:20,686] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_00-model_states.pt... +[2024-04-24 07:22:20,696] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_01-model_states.pt. +[2024-04-24 07:22:20,727] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_00-model_states.pt. +[2024-04-24 07:22:20,770] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_01-model_states.pt... +[2024-04-24 07:22:20,783] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_01-model_states.pt... +[2024-04-24 07:22:20,822] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_00-model_states.pt... +[2024-04-24 07:22:20,986] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_00-model_states.pt. +[2024-04-24 07:22:21,055] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_00-model_states.pt... +[2024-04-24 07:22:21,090] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_01-model_states.pt. +[2024-04-24 07:22:21,116] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_00-model_states.pt. +[2024-04-24 07:22:21,148] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_01-model_states.pt. +[2024-04-24 07:22:21,171] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_01-model_states.pt... +[2024-04-24 07:22:21,198] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_00-model_states.pt... +[2024-04-24 07:22:21,236] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_01-model_states.pt... +[2024-04-24 07:22:21,341] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_00-model_states.pt. +[2024-04-24 07:22:21,414] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_00-model_states.pt... +[2024-04-24 07:22:21,475] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_01-model_states.pt. +[2024-04-24 07:22:21,508] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_00-model_states.pt. +[2024-04-24 07:22:21,549] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_01-model_states.pt. +[2024-04-24 07:22:21,563] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_01-model_states.pt... +[2024-04-24 07:22:21,593] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_00-model_states.pt... +[2024-04-24 07:22:21,639] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_01-model_states.pt... +[2024-04-24 07:22:21,695] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_00-model_states.pt. +[2024-04-24 07:22:21,769] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_00-model_states.pt... +[2024-04-24 07:22:21,866] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_01-model_states.pt. +[2024-04-24 07:22:21,881] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_00-model_states.pt. +[2024-04-24 07:22:21,946] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_01-model_states.pt... +[2024-04-24 07:22:21,958] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_00-model_states.pt... +[2024-04-24 07:22:21,969] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_01-model_states.pt. +[2024-04-24 07:22:22,044] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_01-model_states.pt... +[2024-04-24 07:22:22,067] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_00-model_states.pt. +[2024-04-24 07:22:22,142] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_00-model_states.pt... +[2024-04-24 07:22:22,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_01-model_states.pt. +[2024-04-24 07:22:22,264] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_00-model_states.pt. +[2024-04-24 07:22:22,337] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_01-model_states.pt... +[2024-04-24 07:22:22,346] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_00-model_states.pt... +[2024-04-24 07:22:22,348] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_01-model_states.pt. +[2024-04-24 07:22:22,418] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_00-model_states.pt. +[2024-04-24 07:22:22,429] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_01-model_states.pt... +[2024-04-24 07:22:22,488] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_00-model_states.pt... +[2024-04-24 07:22:22,628] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_01-model_states.pt. +[2024-04-24 07:22:22,643] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_00-model_states.pt. +[2024-04-24 07:22:22,647] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_00-model_states.pt... +[2024-04-24 07:22:22,652] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_00-model_states.pt. +[2024-04-24 07:22:22,708] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_01-model_states.pt... +[2024-04-24 07:22:22,723] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_01-model_states.pt. +[2024-04-24 07:22:22,726] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_01-model_states.pt... +[2024-04-24 07:22:22,729] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_00-model_states.pt... +[2024-04-24 07:22:22,738] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_01-model_states.pt. +[2024-04-24 07:22:22,756] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_00-model_states.pt. +[2024-04-24 07:22:22,758] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt +[2024-04-24 07:22:22,758] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt... +[2024-04-24 07:22:22,780] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt. +[2024-04-24 07:22:22,783] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:22,785] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:22,807] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_01-model_states.pt... +[2024-04-24 07:22:22,983] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_00-model_states.pt. +[2024-04-24 07:22:22,985] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_02_model_states.pt... +[2024-04-24 07:22:23,009] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_02_model_states.pt. +[2024-04-24 07:22:23,013] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:23,014] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:23,016] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_01-model_states.pt. +[2024-04-24 07:22:23,018] [INFO] [logging.py:96:log_dist] [Rank 1] Saving model checkpoint: /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt +[2024-04-24 07:22:23,018] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt... +[2024-04-24 07:22:23,036] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt. +[2024-04-24 07:22:23,039] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:23,040] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:23,059] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_01-model_states.pt. +[2024-04-24 07:22:23,061] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_03_model_states.pt... +[2024-04-24 07:22:23,081] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_03_model_states.pt. +[2024-04-24 07:22:23,083] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:23,085] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,301] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:36,302] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:36,438] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:36,439] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:36,440] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:36,441] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,796] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:36,797] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,821] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:36,822] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:38,082] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +[2024-04-24 07:22:38,083] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:38,152] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +[2024-04-24 07:22:38,152] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:38,158] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:38,158] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:51,480] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:56,262] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:56,262] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt +[2024-04-24 07:22:56,262] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:56,904] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +[2024-04-24 07:22:56,904] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,019] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:57,020] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt +[2024-04-24 07:22:57,020] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,693] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:57,694] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt +[2024-04-24 07:22:57,694] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,834] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:57,834] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +[2024-04-24 07:22:57,834] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,852] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:57,853] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt +[2024-04-24 07:22:57,853] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:23:02,350] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +[2024-04-24 07:23:02,351] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt +[2024-04-24 07:23:02,351] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:23:03,032] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +[2024-04-24 07:23:03,032] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt +[2024-04-24 07:23:03,033] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +Couldn't save model in huggingface format +Couldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface format + +Couldn't save model in huggingface format +Couldn't save model in huggingface format + + + + successfully saved checkpoint at iteration 20 to /data/output/llama13b_5M//checkpoints_zero_stage_2 +Checkpoint Save GB: 78.286, GB/Sec: 1.79, Latency(second): 43.674 +time (ms) | save-checkpoint: 43676.29 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:23:07,111] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +Convert DeepSpeed Checkpoint to Universal Checkpoint +args = Namespace(input_folder='/data/output/llama13b_5M//checkpoints_zero_stage_2', output_folder='/data/output/univ_ckpt_new', num_extract_workers=4, num_merge_workers=2) +Converting DeepSpeed checkpoint in /data/output/llama13b_5M//checkpoints_zero_stage_2 to Universal checkpoint in /data/output/univ_ckpt_new +*** 1. Extracting ZeRO fragments + 0%| | 0/2 [00:00 + main() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 329, in main + model = create_model(args, config) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 80, in create_model + model_config.vocab_size = config['MODEL']['vocab_size'] +KeyError: 'vocab_size' +Using device 'hpu' +/usr/local/lib/python3.10/dist-packages/torch/_utils.py:842: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for hellaswag contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hellaswag +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Task: hellaswag; number of docs: 10042 +Task: hellaswag; document 0; context prompt (starting on next line): +Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you. +(end of prompt on previous line) +Requests: [Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' You can visit a lingerie shop and have them measure you to help you fit a bra to your size, or measure yourself before you shop for a new bra to ensure that you get a good fit. Use a flexible tape measure, like one found in a sewing kit.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' This is why it is important to keep your breasts under protection when in the shower and only wear bras that are larger than your breast size. If you are not wearing a bra, try wearing something that is a little bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' For a girl, a bra with a support strap will be easier for her, because most women are unable to pull through bra straps and bras that are too small will not be able to support breasts from side-to-side. Many bras have even been created that cover the breast side, and can be sent to other women in the world to make them look bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' Choose a color that is flattering to your breast type and specific event, in addition to those that make you uncomfortable. Look for sports bras made from natural material, such as spandex or lycra, as this is a more breathable bra.')[0] +] +Running loglikelihood requests + 0%| | 0/40145 [00:00 + main() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 329, in main + model = create_model(args, config) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 80, in create_model + model_config.vocab_size = config['MODEL']['vocab_size'] +KeyError: 'vocab_size' +Using device 'hpu' +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for hellaswag contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hellaswag +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Task: hellaswag; number of docs: 10042 +Task: hellaswag; document 0; context prompt (starting on next line): +Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you. +(end of prompt on previous line) +Requests: [Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' You can visit a lingerie shop and have them measure you to help you fit a bra to your size, or measure yourself before you shop for a new bra to ensure that you get a good fit. Use a flexible tape measure, like one found in a sewing kit.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' This is why it is important to keep your breasts under protection when in the shower and only wear bras that are larger than your breast size. If you are not wearing a bra, try wearing something that is a little bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' For a girl, a bra with a support strap will be easier for her, because most women are unable to pull through bra straps and bras that are too small will not be able to support breasts from side-to-side. Many bras have even been created that cover the breast side, and can be sent to other women in the world to make them look bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' Choose a color that is flattering to your breast type and specific event, in addition to those that make you uncomfortable. Look for sports bras made from natural material, such as spandex or lycra, as this is a more breathable bra.')[0] +] +Running loglikelihood requests + 0%| | 0/40145 [00:00