diff --git a/132node/ds_config.json b/132node/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/132node/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/132node/log.txt b/132node/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f3df373f62ba4ebe1a344d844680a0b26aad004 --- /dev/null +++ b/132node/log.txt @@ -0,0 +1,1173 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:02:56,583] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-03 22:02:57,638] [WARNING] [runner.py:206:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2024-04-03 22:02:57,702] [INFO] [runner.py:585:main] cmd = /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29500 --no_python --no_local_rank --enable_each_rank_log=None /usr/bin/bash -c cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 2 --pipeline-model-parallel-size 2 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 40 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 100 --data-path /data/arxiv/tokenized_text_document --vocab-file /data/arxiv/gpt2-vocab.json --merge-file /data/arxiv/gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/132node/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/132node/checkpoints --deepspeed_config=/data/output/132node/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/132node/checkpoints --save-interval 2000 --verify-checkpoint --verify-checkpoint-model-type LLAMA +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:02:59,064] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-03 22:03:00,115] [INFO] [launch.py:146:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2024-04-03 22:03:00,115] [INFO] [launch.py:152:main] nnodes=1, num_local_procs=8, node_rank=0 +[2024-04-03 22:03:00,115] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2024-04-03 22:03:00,115] [INFO] [launch.py:164:main] dist_world_size=8 +[2024-04-03 22:03:00,115] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,731] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,736] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,736] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,738] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,740] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,749] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,830] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-03 22:03:01,845] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +using world size: 8, data-parallel-size: 2, tensor-model-parallel size: 2, pipeline-model-parallel size: 2 +accumulate and all-reduce gradients in fp32 for bfloat16 data type. +using torch.bfloat16 for parameters ... +------------------------ arguments ------------------------ + accumulate_allreduce_grads_in_fp32 .............. True + activation_func_type ............................ swiglu + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.95 + adam_eps ........................................ 1e-06 + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + aml_data_download_path .......................... None + apply_layernorm_weight_plus_one ................. False + apply_query_key_layer_scaling ................... True + apply_residual_connection_post_layernorm ........ False + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + bert_binary_head ................................ True + bert_load ....................................... None + bf16 ............................................ True + bias_dropout_fusion ............................. False + bias_gelu_fusion ................................ False + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + cache_fp8_weight ................................ False + cache_fp8_weight_fwd ............................ True + checkpoint_activations .......................... False + checkpoint_activations_granularity .............. full + checkpoint_in_cpu ............................... False + checkpoint_num_layers ........................... 1 + clearml_config_path ............................. None + clearml_continue_exp ............................ False + clearml_exp_name ................................ None + clip_grad ....................................... 1.0 + compression_training ............................ False + consumed_train_samples .......................... 0 + consumed_train_tokens ........................... 0 + consumed_valid_samples .......................... 0 + contigious_checkpointing ........................ False + cpu_optimizer ................................... False + cpu_torch_adam .................................. False + create_moe_param_group .......................... False + curriculum_learning ............................. False + data_idx_path ................................... None + data_impl ....................................... infer + data_parallel_size .............................. 2 + data_path ....................................... ['/data/arxiv/tokenized_text_document'] + data_sharding ................................... True + dataloader_type ................................. single + DDP_impl ........................................ local + decoder_seq_length .............................. None + deepscale ....................................... False + deepscale_config ................................ None + deepspeed ....................................... True + deepspeed_activation_checkpointing .............. False + deepspeed_config ................................ /data/output/132node/ds_config.json + deepspeed_mpi ................................... False + distribute_checkpointed_activations ............. False + distributed_backend ............................. hccl + do_layernorm_bias_weight_decay .................. False + do_pretrain_validation .......................... False + ds_inference .................................... False + ds_pipeline_enabled ............................. True + embed_layernorm ................................. False + embedding_path .................................. None + enable_expert_tensor_parallelism ................ False + encoder_seq_length .............................. 2048 + eod_mask_loss ................................... False + eval_interval ................................... 100 + eval_iters ...................................... 10 + eval_loss_exit_value ............................ None + eval_micro_batch_size ........................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... 0 + expert_interval ................................. 2 + ffn_hidden_coeff ................................ 2.6666666666666665 + ffn_hidden_size ................................. 13824 + finetune ........................................ False + fix_position_emb_redundant_alloc ................ False + flatten_linear_operands ......................... False + fp16 ............................................ False + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + global_batch_size ............................... 256 + hidden_dropout .................................. 0.1 + hidden_size ..................................... 5120 + hidden_size_teacher ............................. None + hpu_deterministic ............................... True + hpu_fp8_format .................................. e5m2 + hpu_fp8_measure_interval ........................ 10 + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_dim ......................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference ....................................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + initial_loss_scale .............................. 4294967296 + kd .............................................. False + kd_alpha_ce ..................................... 1 + kd_beta_ce ...................................... 1 + kd_temp ......................................... 1.0 + kill_switch_path ................................ None + kv_channels ..................................... 128 + layernorm_epsilon ............................... 1e-06 + layernorm_type .................................. rmsnorm + lazy_mpu_init ................................... None + load ............................................ /data/output/132node/checkpoints + load_teacher .................................... None + local_rank ...................................... 0 + log_batch_size_to_tensorboard ................... True + log_bwd_grads ................................... False + log_fwd_activations ............................. False + log_interval .................................... 10 + log_learning_rate_to_tensorboard ................ True + log_loss_scale_to_tensorboard ................... True + log_model_inputs ................................ False + log_num_zeros_in_grad ........................... False + log_optimizer_states_to_tensorboard ............. False + log_params_norm ................................. False + log_timers_to_tensorboard ....................... True + log_validation_ppl_to_tensorboard ............... True + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0003 + lr_decay_iters .................................. None + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_decay_tokens ................................. None + lr_warmup_fraction .............................. None + lr_warmup_iters ................................. 2000 + lr_warmup_samples ............................... 0 + lr_warmup_tokens ................................ None + make_vocab_size_divisible_by .................... 128 + mask_prob ....................................... 0.15 + mask_tensor_adding .............................. False + masked_softmax_fusion ........................... False + max_position_embeddings ......................... None + memory_centric_tiled_linear ..................... False + merge_file ...................................... /data/arxiv/gpt2-merges.txt + micro_batch_size ................................ 1 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_type ........................................ standard + mmap_warmup ..................................... False + moe_eval_capacity_factor ........................ 1.0 + moe_expert_parallel_size ........................ 1 + moe_loss_coeff .................................. 0.1 + moe_min_capacity ................................ 4 + moe_token_dropping .............................. True + moe_train_capacity_factor ....................... 1.0 + mos ............................................. False + no_bias ......................................... True + no_cuda ......................................... False + no_load_lr_state ................................ False + no_load_optim ................................... None + no_load_rng ..................................... None + no_pipeline_parallel ............................ False + no_save_optim ................................... None + no_save_rng ..................................... None + no_scaled_init .................................. False + num_attention_heads ............................. 40 + num_attention_heads_teacher ..................... None + num_channels .................................... 3 + num_classes ..................................... 1000 + num_experts ..................................... [1] + num_experts_teacher ............................. [1] + num_key_value_heads ............................. 40 + num_layers ...................................... 40 + num_layers_per_virtual_pipeline_stage ........... None + num_layers_teacher .............................. None + num_workers ..................................... 2 + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adamw + override_lr_scheduler ........................... False + params_dtype .................................... torch.bfloat16 + partition_activations ........................... False + patch_dim ....................................... 16 + pipeline_model_parallel_size .................... 2 + position_embedding_type ......................... PositionEmbeddingType.rotary + profile ......................................... None + profile_backward ................................ False + profile_steps ................................... 2,3 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + remote_device ................................... none + reset_attention_mask ............................ False + reset_iteration ................................. False + reset_position_ids .............................. False + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + sample_rate ..................................... 1.0 + save ............................................ /data/output/132node/checkpoints + save_interval ................................... 2000 + scatter_gather_tensors_in_pipeline .............. True + scattered_embeddings ............................ False + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... True + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + split ........................................... 969, 30, 1 + split_transformers .............................. False + synchronize_each_layer .......................... False + tensor_logger_max_iter .......................... 0 + tensor_logger_path .............................. None + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. /data/output/132node/tensorboard + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + tile_factor ..................................... 1 + titles_data_path ................................ None + tokenizer_eod_id ................................ None + tokenizer_model_file ............................ None + tokenizer_type .................................. GPT2BPETokenizer + topk ............................................ 1 + train_data_path ................................. None + train_iters ..................................... 10000 + train_samples ................................... None + train_tokens .................................... None + universal_checkpoint ............................ False + use_checkpoint_lr_scheduler ..................... False + use_contiguous_buffers_in_ddp ................... True + use_cpu_initialization .......................... None + use_fused_sdpa .................................. True + use_fused_sdpa_with_recompute ................... False + use_hpu ......................................... True + use_hpu_fp8_transformer_engine .................. False + use_hpu_graphs .................................. False + use_one_sent_docs ............................... False + use_pin_memory .................................. False + use_rotary_v2 ................................... False + use_seq_len_plus_one_tokens ..................... True + use_torch_compile ............................... False + use_tutel ....................................... False + valid_data_path ................................. None + verify_checkpoint ............................... True + verify_checkpoint_model_type .................... LLAMA + verify_tp_workers ............................... False + verify_tp_workers_hash .......................... False + virtual_pipeline_model_parallel_size ............ None + vocab_extra_ids ................................. 0 + vocab_file ...................................... /data/arxiv/gpt2-vocab.json + weight_decay .................................... 0.1 + world_size ...................................... 8 + zero_allgather_bucket_size ...................... 0.0 + zero_contigious_gradients ....................... False + zero_reduce_bucket_size ......................... 0.0 + zero_reduce_scatter ............................. False + zero_stage ...................................... 0 +-------------------- end of arguments --------------------- +setting number of micro-batches to constant 128 +setting number of micro-batches to constant 128 +> building GPT2BPETokenizer tokenizer ... +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +_initialize_distributed: Initializing with below params: +args.local_rank: 6 +args.world_size: 8 +args.rank: 6 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 1 +args.world_size: 8 +args.rank: 1 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 3 +args.world_size: 8 +args.rank: 3 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 2 +args.world_size: 8 +args.rank: 2 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,241] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,241] [INFO] [comm.py:637:init_distributed] cdb=None +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,252] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,252] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 4 +args.world_size: 8 +args.rank: 4 +args.distributed_backend: hccl + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +_initialize_distributed: Initializing with below params: +args.local_rank: 0 +args.world_size: 8 +args.rank: 0 +args.distributed_backend: hccl +> setting tensorboard ... +_initialize_distributed: Initializing with below params: +args.local_rank: 7 +args.world_size: 8 +args.rank: 7 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,289] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,289] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,296] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,296] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,326] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,327] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +> initializing torch distributed ... +[2024-04-03 22:03:03,343] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,343] [INFO] [comm.py:637:init_distributed] cdb=None +[2024-04-03 22:03:03,343] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,345] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,345] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 5 +args.world_size: 8 +args.rank: 5 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-03 22:03:03,441] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-03 22:03:03,441] [INFO] [comm.py:637:init_distributed] cdb=None +> initializing tensor model parallel with size 2 +> initializing pipeline model parallel with size 2 +> setting random seeds to 1234 ... +> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +============================= HABANA PT BRIDGE CONFIGURATION =========================== + PT_HPU_LAZY_MODE = 1 + PT_RECIPE_CACHE_PATH = + PT_CACHE_FOLDER_DELETE = 0 + PT_HPU_RECIPE_CACHE_CONFIG = + PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807 + PT_HPU_LAZY_ACC_PAR_MODE = 0 + PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0 +---------------------------: System Configuration :--------------------------- +Num CPU Cores : 160 +CPU RAM : 1056375244 KB +------------------------------------------------------------------------------ +> compiling dataset index builder ... +make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +make: Nothing to be done for 'default'. +make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +>>> done with dataset index builder. Compilation time: 0.072 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 0.005 seconds +time to initialize megatron (seconds): 29.272 +[after megatron is initialized] datetime: 2024-04-03 22:03:09 +building LLaMA model ... +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 3301253120 +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) + > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 3301258240 +[2024-04-03 22:03:09,344] [INFO] [utils.py:824:see_memory_usage] Before Building Model + > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 3301258240 +[2024-04-03 22:03:09,347] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:09,348] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 345.07 GB, percent = 34.3% +SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=0, model=1): 1, ProcessCoord(pipe=0, data=1, model=0): 2, ProcessCoord(pipe=0, data=1, model=1): 3, ProcessCoord(pipe=1, data=0, model=0): 4, ProcessCoord(pipe=1, data=0, model=1): 5, ProcessCoord(pipe=1, data=1, model=0): 6, ProcessCoord(pipe=1, data=1, model=1): 7} +[2024-04-03 22:03:09,350] [INFO] [module.py:375:_partition_layers] Partitioning pipeline stages with method type:transformer +stage=0 layers=23 + 0: _to_float16 + 1: EmbeddingPipe + 2: + 3: ParallelTransformerLayerPipe + 4: ParallelTransformerLayerPipe + 5: ParallelTransformerLayerPipe + 6: ParallelTransformerLayerPipe + 7: ParallelTransformerLayerPipe + 8: ParallelTransformerLayerPipe + 9: ParallelTransformerLayerPipe + 10: ParallelTransformerLayerPipe + 11: ParallelTransformerLayerPipe + 12: ParallelTransformerLayerPipe + 13: ParallelTransformerLayerPipe + 14: ParallelTransformerLayerPipe + 15: ParallelTransformerLayerPipe + 16: ParallelTransformerLayerPipe + 17: ParallelTransformerLayerPipe + 18: ParallelTransformerLayerPipe + 19: ParallelTransformerLayerPipe + 20: ParallelTransformerLayerPipe + 21: ParallelTransformerLayerPipe + 22: ParallelTransformerLayerPipe +stage=1 layers=25 + 23: ParallelTransformerLayerPipe + 24: ParallelTransformerLayerPipe + 25: ParallelTransformerLayerPipe + 26: ParallelTransformerLayerPipe + 27: ParallelTransformerLayerPipe + 28: ParallelTransformerLayerPipe + 29: ParallelTransformerLayerPipe + 30: ParallelTransformerLayerPipe + 31: ParallelTransformerLayerPipe + 32: ParallelTransformerLayerPipe + 33: ParallelTransformerLayerPipe + 34: ParallelTransformerLayerPipe + 35: ParallelTransformerLayerPipe + 36: ParallelTransformerLayerPipe + 37: ParallelTransformerLayerPipe + 38: ParallelTransformerLayerPipe + 39: ParallelTransformerLayerPipe + 40: ParallelTransformerLayerPipe + 41: ParallelTransformerLayerPipe + 42: ParallelTransformerLayerPipe + 43: + 44: WrapName + 45: WrapName + 46: + 47: float16_to_fp32 + loss: CrossEntropy +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +[2024-04-03 22:03:09,470] [INFO] [utils.py:824:see_memory_usage] After Building Model +[2024-04-03 22:03:09,474] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:09,474] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 345.56 GB, percent = 34.3% + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 3301253120 +> learning rate decay style: cosine +DeepSpeed is enabled. +[2024-04-03 22:03:09,479] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.4+hpu.synapse.v1.14.0, git-hash=fad45b2, git-branch=1.14.0 +[2024-04-03 22:03:10,368] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False +[2024-04-03 22:03:10,369] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer +[2024-04-03 22:03:10,369] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer +[2024-04-03 22:03:10,371] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW +[2024-04-03 22:03:10,371] [INFO] [logging.py:96:log_dist] [Rank 0] Creating BF16 optimizer +[2024-04-03 22:03:10,445] [INFO] [utils.py:824:see_memory_usage] begin bf16_optimizer +[2024-04-03 22:03:10,448] [INFO] [utils.py:825:see_memory_usage] MA 6.16 GB Max_MA 6.18 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:10,448] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.0 GB, percent = 34.5% +[2024-04-03 22:03:10,513] [INFO] [utils.py:824:see_memory_usage] before initializing group 0 +[2024-04-03 22:03:10,516] [INFO] [utils.py:825:see_memory_usage] MA 6.16 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:10,516] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.08 GB, percent = 34.6% +[2024-04-03 22:03:10,777] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:10,789] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:10,837] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:10,838] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:10,861] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:11,089] [INFO] [utils.py:824:see_memory_usage] after initializing group 0 +[2024-04-03 22:03:11,092] [INFO] [utils.py:825:see_memory_usage] MA 6.16 GB Max_MA 12.31 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:11,092] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.52 GB, percent = 34.6% +[2024-04-03 22:03:11,147] [INFO] [utils.py:824:see_memory_usage] before initializing group 1 +[2024-04-03 22:03:11,150] [INFO] [utils.py:825:see_memory_usage] MA 6.16 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:11,150] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.52 GB, percent = 34.6% +[2024-04-03 22:03:11,231] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:11,236] [INFO] [utils.py:824:see_memory_usage] after initializing group 1 +[2024-04-03 22:03:11,239] [INFO] [utils.py:825:see_memory_usage] MA 24.61 GB Max_MA 24.61 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:11,239] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.52 GB, percent = 34.6% +[2024-04-03 22:03:11,240] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:11,294] [INFO] [utils.py:824:see_memory_usage] before initialize_optimizer +[2024-04-03 22:03:11,297] [INFO] [utils.py:825:see_memory_usage] MA 24.61 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:11,297] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.55 GB, percent = 34.6% +[2024-04-03 22:03:11,348] [INFO] [utils.py:824:see_memory_usage] end initialize_optimizer +[2024-04-03 22:03:11,352] [INFO] [utils.py:825:see_memory_usage] MA 24.61 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:11,352] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.52 GB, percent = 34.6% +[2024-04-03 22:03:11,403] [INFO] [utils.py:824:see_memory_usage] end bf16_optimizer +[2024-04-03 22:03:11,406] [INFO] [utils.py:825:see_memory_usage] MA 24.61 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-03 22:03:11,406] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 348.52 GB, percent = 34.6% +[2024-04-03 22:03:11,407] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = BF16_Optimizer +[2024-04-03 22:03:11,407] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler +[2024-04-03 22:03:11,407] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = +[2024-04-03 22:03:11,407] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.95), (0.9, 0.95)] +[2024-04-03 22:03:11,408] [INFO] [config.py:992:print] DeepSpeedEngine configuration: +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] activation_checkpointing_config { + "partition_activations": false, + "contiguous_memory_optimization": false, + "cpu_checkpointing": false, + "number_checkpoints": null, + "synchronize_checkpoint_boundary": false, + "profile": false +} +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] amp_enabled .................. False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] amp_params ................... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] autotuning_config ............ { + "enabled": false, + "start_step": null, + "end_step": null, + "metric_path": null, + "arg_mappings": null, + "metric": "throughput", + "model_info": null, + "results_dir": "autotuning_results", + "exps_dir": "autotuning_exps", + "overwrite": true, + "fast": true, + "start_profile_step": 3, + "end_profile_step": 5, + "tuner_type": "gridsearch", + "tuner_early_stopping": 5, + "tuner_num_trials": 50, + "model_info_path": null, + "mp_size": 1, + "max_train_batch_size": null, + "min_train_batch_size": 1, + "max_train_micro_batch_size_per_gpu": 1.024000e+03, + "min_train_micro_batch_size_per_gpu": 1, + "num_tuning_micro_batch_sizes": 3 +} +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] bfloat16_accumulate_grads_via_hooks True +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] bfloat16_enabled ............. True +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] checkpoint_parallel_write_pipeline False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] checkpoint_tag_validation_enabled True +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] checkpoint_tag_validation_fail False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] comms_config ................. +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] communication_data_type ...... None +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] curriculum_enabled_legacy .... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] curriculum_params_legacy ..... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] data_efficiency_enabled ...... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] dataloader_drop_last ......... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] disable_allgather ............ False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] dump_state ................... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] dynamic_loss_scale_args ...... None +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_enabled ........... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_gas_boundary_resolution 1 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_layer_name ........ bert.encoder.layer +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_layer_num ......... 0 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_max_iter .......... 100 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_stability ......... 1e-06 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_tol ............... 0.01 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] eigenvalue_verbose ........... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] elasticity_enabled ........... False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] flops_profiler_config ........ { + "enabled": false, + "recompute_fwd_factor": 0.0, + "profile_step": 1, + "module_depth": -1, + "top_modules": 1, + "detailed": true, + "output_file": null +} +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] fp16_auto_cast ............... None +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] fp16_enabled ................. False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] fp16_master_weights_and_gradients False +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] global_rank .................. 0 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] grad_accum_dtype ............. None +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] gradient_accumulation_steps .. 128 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] gradient_clipping ............ 1.0 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] gradient_predivide_factor .... 1.0 +[2024-04-03 22:03:11,408] [INFO] [config.py:996:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] initial_dynamic_scale ........ 1 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] load_universal_checkpoint .... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] loss_scale ................... 1.0 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] memory_breakdown ............. False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] mics_hierarchial_params_gather False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] mics_shard_size .............. -1 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] nebula_config ................ { + "enabled": false, + "persistent_storage_path": null, + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true, + "load_path": null +} +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] optimizer_legacy_fusion ...... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] optimizer_name ............... None +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] optimizer_params ............. None +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': False, 'grad_partitioned': False} +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] pld_enabled .................. False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] pld_params ................... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] prescale_gradients ........... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] scheduler_name ............... None +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] scheduler_params ............. None +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] seq_parallel_communication_data_type torch.float32 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] sparse_attention ............. None +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] sparse_gradients_enabled ..... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] steps_per_print .............. 10 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] train_batch_size ............. 256 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] train_micro_batch_size_per_gpu 1 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] use_data_before_expert_parallel_ False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] use_node_local_storage ....... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] wall_clock_breakdown ......... False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] weight_quantization_config ... None +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] world_size ................... 2 +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] zero_allow_comm_data_type_fp32 False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] zero_allow_untested_optimizer False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] zero_config .................. stage=0 contiguous_gradients=True reduce_scatter=False reduce_bucket_size=500,000,000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False use_all_reduce_for_fetch_params=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] zero_enabled ................. False +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] zero_force_ds_cpu_optimizer .. True +[2024-04-03 22:03:11,409] [INFO] [config.py:996:print] zero_optimization_stage ...... 0 +[2024-04-03 22:03:11,409] [INFO] [config.py:982:print_user_config] json = { + "train_batch_size": 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": { + "enabled": false + }, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} +[2024-04-03 22:03:11,409] [INFO] [engine.py:99:__init__] CONFIG: micro_batches=128 micro_batch_size=1 +[2024-04-03 22:03:11,409] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-03 22:03:12,129] [INFO] [engine.py:180:__init__] RANK=4 STAGE=1 LAYERS=25 [23, 48) STAGE_PARAMS=3301258240 (3301.258M) TOTAL_PARAMS=13205022720 (13205.023M) UNIQUE_PARAMS=13205022720 (13205.023M) +[2024-04-03 22:03:12,129] [INFO] [engine.py:180:__init__] RANK=1 STAGE=0 LAYERS=23 [0, 23) STAGE_PARAMS=3301253120 (3301.253M) TOTAL_PARAMS=13205022720 (13205.023M) UNIQUE_PARAMS=13205022720 (13205.023M) +[2024-04-03 22:03:12,129] [INFO] [engine.py:180:__init__] RANK=5 STAGE=1 LAYERS=25 [23, 48) STAGE_PARAMS=3301258240 (3301.258M) TOTAL_PARAMS=13205022720 (13205.023M) UNIQUE_PARAMS=13205022720 (13205.023M) +[2024-04-03 22:03:12,130] [INFO] [engine.py:180:__init__] RANK=0 STAGE=0 LAYERS=23 [0, 23) STAGE_PARAMS=3301253120 (3301.253M) TOTAL_PARAMS=13205022720 (13205.023M) UNIQUE_PARAMS=13205022720 (13205.023M) +[2024-04-03 22:03:12,132] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +WARNING: could not find the metadata file /data/output/132node/checkpoints + will not load any checkpoints and will start from random +[2024-04-03 22:03:12,132] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-03 22:03:12,133] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-03 22:03:12,133] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-03 22:03:12,133] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-03 22:03:12,133] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-03 22:03:12,133] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-03 22:03:12,133] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/132node/checkpoints/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +time (ms) | load-checkpoint: 2.42 +[after model, optimizer, and learning rate scheduler are built] datetime: 2024-04-03 22:03:12 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 2560000 + validation: 258560 + test: 2560 +> building train, validation, and test datasets for GPT ... +Single data path provided for train, valid & test + > building dataset index ... + reading sizes... + reading pointers... + reading document index... + creating numpy buffer of mmap... + creating memory view of numpy buffer... + > finished creating indexed dataset in 0.001090 seconds + number of documents: 1558306 + > dataset split: + train: + document indices in [0, 1509999) total of 1509999 documents + validation: + document indices in [1509999, 1556748) total of 46749 documents + test: + document indices in [1556748, 1558306) total of 1558 documents +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy + > loaded doc-idx mapping from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + > loaded sample-idx mapping from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + +Loading dataset index file from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv/tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.002 seconds + total number of samples: 15244235 + total number of epochs: 1 +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_sample_idx.npy + > loaded doc-idx mapping from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_shuffle_idx.npy > loaded sample-idx mapping from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_shuffle_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_shuffle_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv/tokenized_text_document_valid_indexmap_258560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.002 seconds + total number of samples: 481162 + total number of epochs: 1 +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy + > loaded doc-idx mapping from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + > loaded sample-idx mapping from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy +Loading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + + > loaded shuffle-idx mapping from /data/arxiv/tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.001 seconds + total number of samples: 16581 + total number of epochs: 1 +> finished creating GPT datasets ... +time (ms) | model-and-optimizer-setup: 2860.75 | train/valid/test-data-iterators-setup: 1113.55 +[after dataloaders are built] datetime: 2024-04-03 22:03:13 +done with setup ... +training ... +[before the start of training step] datetime: 2024-04-03 22:03:13 +[2024-04-03 22:08:15,101] [INFO] [logging.py:96:log_dist] [Rank 0] step=10, skipped=0, lr=[1.4999999999999998e-06, 1.4999999999999998e-06], mom=[(0.9, 0.95), (0.9, 0.95)] +steps: 10 loss: 11.4461 iter time (s): 30.238 samples/sec: 8.466 +[Rank 0] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +[Rank 1] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +[Rank 4] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 +[Rank 5] (after 10 iterations) memory (MB) | allocated: 0.0 | max allocated: 0.0 | reserved: 0.0 | max reserved: 0.0 + iteration 10/ 10000 | consumed samples: 2560 | consumed tokens: 5242880 | elapsed time per iteration (ms): 30185.1 | learning rate: 1.500E-06 | global batch size: 256 | lm loss: 1.177418E+01 | loss scale: 0.0 | grad norm: 13.481 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 8.481 | TFLOPs: 177.37 | diff --git a/llama13b_5M/checkpoints_zero_stage_2/latest b/llama13b_5M/checkpoints_zero_stage_2/latest new file mode 100644 index 0000000000000000000000000000000000000000..1bba58c86974961336c3c213d21aaf21e8ea4f91 --- /dev/null +++ b/llama13b_5M/checkpoints_zero_stage_2/latest @@ -0,0 +1 @@ +global_step0 \ No newline at end of file diff --git a/llama13b_5M/checkpoints_zero_stage_2/latest_checkpointed_iteration.txt b/llama13b_5M/checkpoints_zero_stage_2/latest_checkpointed_iteration.txt new file mode 100644 index 0000000000000000000000000000000000000000..86ee83a4a26867a79e0a5a8948153cff8f4cf9a4 --- /dev/null +++ b/llama13b_5M/checkpoints_zero_stage_2/latest_checkpointed_iteration.txt @@ -0,0 +1 @@ +40 \ No newline at end of file diff --git a/llama13b_5M/checkpoints_zero_stage_2/zero_to_fp32.py b/llama13b_5M/checkpoints_zero_stage_2/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..49b846633d6eb1e836e34681e44033581f4edb7b --- /dev/null +++ b/llama13b_5M/checkpoints_zero_stage_2/zero_to_fp32.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: python zero_to_fp32.py . pytorch_model.bin + +import argparse +import torch +import glob +import math +import os +import re +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + + total_files = len(files) + state_dicts = [] + for f in files: + state_dict = torch.load(f, map_location=device) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + if zero_stage <= 2: + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + elif zero_stage == 3: + # if there is more than one param group, there will be multiple flattened tensors - one + # flattened tensor per group - for simplicity merge them into a single tensor + # + # XXX: could make the script more memory efficient for when there are multiple groups - it + # will require matching the sub-lists of param_shapes for each param group flattened tensor + + fp32_flat_groups = [ + torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) + ] + + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = fp32_flat_groups[0].numel() * world_size + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + for name, shape in param_shapes.items(): + + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # XXX: memory usage doubles here + state_dict[name] = torch.cat( + tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), + 0).narrow(0, 0, unpartitioned_numel).view(shape) + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + + Returns: + - pytorch ``state_dict`` + + Note: this approach may not work if your application doesn't have sufficient free CPU memory and + you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + """ + + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + print(f"Saving fp32 state dict to {output_file}") + torch.save(state_dict, output_file) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument( + "output_file", + type=str, + help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/llama13b_5M/ds_config.json b/llama13b_5M/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_5M/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_5M/first_run.txt b/llama13b_5M/first_run.txt new file mode 100644 index 0000000000000000000000000000000000000000..04886d6cbfac9de40e2608db2daf0a66eadb4438 --- /dev/null +++ b/llama13b_5M/first_run.txt @@ -0,0 +1,1496 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:21:56,617] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-24 07:21:58,125] [INFO] [runner.py:585:main] cmd = /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMTM0LjEzMiI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --master_addr=100.83.134.132 --master_port=29500 --no_python --no_local_rank --enable_each_rank_log=None /usr/bin/bash -c cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 2 --pipeline-model-parallel-size 2 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 16 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 20 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_5M//tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_5M//checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_5M//ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_5M//checkpoints_zero_stage_2 --save-interval 20 --verify-checkpoint --verify-checkpoint-model-type LLAMA +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:21:59,707] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-24 07:22:00,960] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.132': [0, 1, 2, 3, 4, 5, 6, 7]} +[2024-04-24 07:22:00,960] [INFO] [launch.py:152:main] nnodes=1, num_local_procs=8, node_rank=0 +[2024-04-24 07:22:00,960] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.134.132': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2024-04-24 07:22:00,960] [INFO] [launch.py:164:main] dist_world_size=8 +[2024-04-24 07:22:00,960] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,851] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,852] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,876] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,908] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,920] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,921] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,925] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,931] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 5 +args.world_size: 8 +args.rank: 5 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 4 +args.world_size: 8 +args.rank: 4 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 1 +args.world_size: 8 +args.rank: 1 +args.distributed_backend: hccl +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +_initialize_distributed: Initializing with below params: +args.local_rank: 3 +args.world_size: 8 +args.rank: 3 +args.distributed_backend: hccl +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +using world size: 8, data-parallel-size: 2, tensor-model-parallel size: 2, pipeline-model-parallel size: 2 +accumulate and all-reduce gradients in fp32 for bfloat16 data type. +using torch.bfloat16 for parameters ... +------------------------ arguments ------------------------ + accumulate_allreduce_grads_in_fp32 .............. True + activation_func_type ............................ swiglu + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.95 + adam_eps ........................................ 1e-06 + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + aml_data_download_path .......................... None + apply_layernorm_weight_plus_one ................. False + apply_query_key_layer_scaling ................... True + apply_residual_connection_post_layernorm ........ False + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + bert_binary_head ................................ True + bert_load ....................................... None + bf16 ............................................ True + bias_dropout_fusion ............................. False + bias_gelu_fusion ................................ False + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + cache_fp8_weight ................................ False + cache_fp8_weight_fwd ............................ True + checkpoint_activations .......................... False + checkpoint_activations_granularity .............. full + checkpoint_in_cpu ............................... False + checkpoint_num_layers ........................... 1 + clearml_config_path ............................. None + clearml_continue_exp ............................ False + clearml_exp_name ................................ None + clip_grad ....................................... 1.0 + compression_training ............................ False + consumed_train_samples .......................... 0 + consumed_train_tokens ........................... 0 + consumed_valid_samples .......................... 0 + contigious_checkpointing ........................ False + cpu_optimizer ................................... False + cpu_torch_adam .................................. False + create_moe_param_group .......................... False + curriculum_learning ............................. False + data_idx_path ................................... None + data_impl ....................................... infer + data_parallel_size .............................. 2 + data_path ....................................... ['/data/arxiv//tokenized_text_document'] + data_sharding ................................... True + dataloader_type ................................. single + DDP_impl ........................................ local + decoder_seq_length .............................. None + deepscale ....................................... False + deepscale_config ................................ None + deepspeed ....................................... True + deepspeed_activation_checkpointing .............. False + deepspeed_config ................................ /data/output/llama13b_5M//ds_config.json + deepspeed_mpi ................................... False + distribute_checkpointed_activations ............. False + distributed_backend ............................. hccl + do_layernorm_bias_weight_decay .................. False + do_pretrain_validation .......................... False + ds_inference .................................... False + ds_pipeline_enabled ............................. True + embed_layernorm ................................. False + embedding_path .................................. None + enable_expert_tensor_parallelism ................ False + encoder_seq_length .............................. 2048 + eod_mask_loss ................................... False + eval_interval ................................... 20 + eval_iters ...................................... 10 + eval_loss_exit_value ............................ None + eval_micro_batch_size ........................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... 0 + expert_interval ................................. 2 + ffn_hidden_coeff ................................ 2.6666666666666665 + ffn_hidden_size ................................. 13824 + finetune ........................................ False + fix_position_emb_redundant_alloc ................ False + flatten_linear_operands ......................... False + fp16 ............................................ False + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + global_batch_size ............................... 256 + hidden_dropout .................................. 0.1 + hidden_size ..................................... 5120 + hidden_size_teacher ............................. None + hpu_deterministic ............................... True + hpu_fp8_format .................................. e5m2 + hpu_fp8_measure_interval ........................ 10 + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_dim ......................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference ....................................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + initial_loss_scale .............................. 4294967296 + kd .............................................. False + kd_alpha_ce ..................................... 1 + kd_beta_ce ...................................... 1 + kd_temp ......................................... 1.0 + kill_switch_path ................................ None + kv_channels ..................................... 128 + layernorm_epsilon ............................... 1e-06 + layernorm_type .................................. rmsnorm + lazy_mpu_init ................................... None + load ............................................ /data/output/llama13b_5M//checkpoints_zero_stage_2 + load_teacher .................................... None + local_rank ...................................... 0 + log_batch_size_to_tensorboard ................... True + log_bwd_grads ................................... False + log_fwd_activations ............................. False + log_interval .................................... 10 + log_learning_rate_to_tensorboard ................ True + log_loss_scale_to_tensorboard ................... True + log_model_inputs ................................ False + log_num_zeros_in_grad ........................... False + log_optimizer_states_to_tensorboard ............. False + log_params_norm ................................. False + log_timers_to_tensorboard ....................... True + log_validation_ppl_to_tensorboard ............... True + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0003 + lr_decay_iters .................................. None + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_decay_tokens ................................. None + lr_warmup_fraction .............................. None + lr_warmup_iters ................................. 2000 + lr_warmup_samples ............................... 0 + lr_warmup_tokens ................................ None + make_vocab_size_divisible_by .................... 128 + mask_prob ....................................... 0.15 + mask_tensor_adding .............................. False + masked_softmax_fusion ........................... False + max_position_embeddings ......................... None + memory_centric_tiled_linear ..................... False + merge_file ...................................... /data/arxiv//gpt2-merges.txt + micro_batch_size ................................ 1 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_type ........................................ standard + mmap_warmup ..................................... False + moe_eval_capacity_factor ........................ 1.0 + moe_expert_parallel_size ........................ 1 + moe_loss_coeff .................................. 0.1 + moe_min_capacity ................................ 4 + moe_token_dropping .............................. True + moe_train_capacity_factor ....................... 1.0 + mos ............................................. False + no_bias ......................................... True + no_cuda ......................................... False + no_load_lr_state ................................ False + no_load_optim ................................... None + no_load_rng ..................................... None + no_pipeline_parallel ............................ False + no_save_optim ................................... None + no_save_rng ..................................... None + no_scaled_init .................................. False + num_attention_heads ............................. 40 + num_attention_heads_teacher ..................... None + num_channels .................................... 3 + num_classes ..................................... 1000 + num_experts ..................................... [1] + num_experts_teacher ............................. [1] + num_key_value_heads ............................. 40 + num_layers ...................................... 16 + num_layers_per_virtual_pipeline_stage ........... None + num_layers_teacher .............................. None + num_workers ..................................... 2 + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adamw + override_lr_scheduler ........................... False + params_dtype .................................... torch.bfloat16 + partition_activations ........................... False + patch_dim ....................................... 16 + pipeline_model_parallel_size .................... 2 + position_embedding_type ......................... PositionEmbeddingType.rotary + profile ......................................... None + profile_backward ................................ False + profile_steps ................................... 2,3 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + remote_device ................................... none + reset_attention_mask ............................ False + reset_iteration ................................. False + reset_position_ids .............................. False + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + sample_rate ..................................... 1.0 + save ............................................ /data/output/llama13b_5M//checkpoints_zero_stage_2 + save_interval ................................... 20 + scatter_gather_tensors_in_pipeline .............. True + scattered_embeddings ............................ False + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... True + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + split ........................................... 969, 30, 1 + split_transformers .............................. False + synchronize_each_layer .......................... False + tensor_logger_max_iter .......................... 0 + tensor_logger_path .............................. None + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. /data/output/llama13b_5M//tensorboard + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + tile_factor ..................................... 1 + titles_data_path ................................ None + tokenizer_eod_id ................................ None + tokenizer_model_file ............................ None + tokenizer_type .................................. GPT2BPETokenizer + topk ............................................ 1 + train_data_path ................................. None + train_iters ..................................... 10000 + train_samples ................................... None + train_tokens .................................... None + universal_checkpoint ............................ False + use_checkpoint_lr_scheduler ..................... False + use_contiguous_buffers_in_ddp ................... True + use_cpu_initialization .......................... None + use_fused_sdpa .................................. True + use_fused_sdpa_with_recompute ................... False + use_hpu ......................................... True + use_hpu_fp8_transformer_engine .................. False + use_hpu_graphs .................................. False + use_one_sent_docs ............................... False + use_pin_memory .................................. False + use_rotary_v2 ................................... False + use_seq_len_plus_one_tokens ..................... True + use_torch_compile ............................... False + use_tutel ....................................... False + valid_data_path ................................. None + verify_checkpoint ............................... True + verify_checkpoint_model_type .................... LLAMA + verify_tp_workers ............................... False + verify_tp_workers_hash .......................... False + virtual_pipeline_model_parallel_size ............ None + vocab_extra_ids ................................. 0 + vocab_file ...................................... /data/arxiv//gpt2-vocab.json + weight_decay .................................... 0.1 + world_size ...................................... 8 + zero_allgather_bucket_size ...................... 0.0 + zero_contigious_gradients ....................... False + zero_reduce_bucket_size ......................... 0.0 + zero_reduce_scatter ............................. False + zero_stage ...................................... 0 +-------------------- end of arguments --------------------- +setting number of micro-batches to constant 128 +setting number of micro-batches to constant 128 +> building GPT2BPETokenizer tokenizer ... +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,063] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,063] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,069] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,069] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 6 +args.world_size: 8 +args.rank: 6 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,086] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,086] [INFO] [comm.py:637:init_distributed] cdb=None +> setting tensorboard ... +_initialize_distributed: Initializing with below params: +args.local_rank: 7 +args.world_size: 8 +args.rank: 7 +args.distributed_backend: hccl + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +_initialize_distributed: Initializing with below params: +args.local_rank: 0 +args.world_size: 8 +args.rank: 0 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,101] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,101] [INFO] [comm.py:637:init_distributed] cdb=None +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,136] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,136] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,152] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,152] [INFO] [comm.py:637:init_distributed] cdb=None +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +> initializing torch distributed ... +[2024-04-24 07:22:06,173] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,173] [INFO] [comm.py:637:init_distributed] cdb=None +[2024-04-24 07:22:06,173] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 2 +args.world_size: 8 +args.rank: 2 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,282] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,282] [INFO] [comm.py:637:init_distributed] cdb=None +> initializing tensor model parallel with size 2 +> initializing pipeline model parallel with size 2 +> setting random seeds to 1234 ... +> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +============================= HABANA PT BRIDGE CONFIGURATION =========================== + PT_HPU_LAZY_MODE = 1 + PT_RECIPE_CACHE_PATH = + PT_CACHE_FOLDER_DELETE = 0 + PT_HPU_RECIPE_CACHE_CONFIG = + PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807 + PT_HPU_LAZY_ACC_PAR_MODE = 0 + PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0 +---------------------------: System Configuration :--------------------------- +Num CPU Cores : 160 +CPU RAM : 1056375244 KB +------------------------------------------------------------------------------ +> compiling dataset index builder ... +make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +make: Nothing to be done for 'default'. +make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +>>> done with dataset index builder. Compilation time: 0.180 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 0.004 seconds +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-mp0t9uy4 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run youthful-wildflower-2012 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/mp0t9uy4 +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-w9athpv9 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run different-planet-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/w9athpv9 +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-211n5b2u +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run jolly-pyramid-2013 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/211n5b2u +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-tbva9yik +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run earthy-plasma-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/tbva9yik +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-h27k7fos +wandb: Run `wandb offline` to turn off syncing. +wandb: Tracking run with wandb version 0.16.6 +wandb: Syncing run pleasant-glitter-2017 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/h27k7fos +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-pu9rwbfz +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run zany-snow-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/pu9rwbfz +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-ph1uqt0g +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run hopeful-pine-2018 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ph1uqt0g +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-amj6vf90 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run swept-sunset-2019 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/amj6vf90 +time to initialize megatron (seconds): 38.936 +[after megatron is initialized] datetime: 2024-04-24 07:22:14 +building LLaMA model ... +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +*************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** + + + +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** + + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** + +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 1397964800 + > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 1397969920 + > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 1397969920 +[2024-04-24 07:22:15,116] [INFO] [utils.py:824:see_memory_usage] Before Building Model +[2024-04-24 07:22:15,119] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:15,120] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 369.82 GB, percent = 36.7% +SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=0, model=1): 1, ProcessCoord(pipe=0, data=1, model=0): 2, ProcessCoord(pipe=0, data=1, model=1): 3, ProcessCoord(pipe=1, data=0, model=0): 4, ProcessCoord(pipe=1, data=0, model=1): 5, ProcessCoord(pipe=1, data=1, model=0): 6, ProcessCoord(pipe=1, data=1, model=1): 7} +[2024-04-24 07:22:15,122] [INFO] [module.py:375:_partition_layers] Partitioning pipeline stages with method type:transformer +stage=0 layers=11 + 0: _to_float16 + 1: EmbeddingPipe + 2: + 3: ParallelTransformerLayerPipe + 4: ParallelTransformerLayerPipe + 5: ParallelTransformerLayerPipe + 6: ParallelTransformerLayerPipe + 7: ParallelTransformerLayerPipe + 8: ParallelTransformerLayerPipe + 9: ParallelTransformerLayerPipe + 10: ParallelTransformerLayerPipe +stage=1 layers=13 + 11: ParallelTransformerLayerPipe + 12: ParallelTransformerLayerPipe + 13: ParallelTransformerLayerPipe + 14: ParallelTransformerLayerPipe + 15: ParallelTransformerLayerPipe + 16: ParallelTransformerLayerPipe + 17: ParallelTransformerLayerPipe + 18: ParallelTransformerLayerPipe + 19: + 20: WrapName + 21: WrapName + 22: + 23: float16_to_fp32 + loss: CrossEntropy +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +[2024-04-24 07:22:15,303] [INFO] [utils.py:824:see_memory_usage] After Building Model +[2024-04-24 07:22:15,306] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:15,307] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 370.67 GB, percent = 36.8% + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 1397964800 +> learning rate decay style: cosine +DeepSpeed is enabled. +[2024-04-24 07:22:15,310] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.4+hpu.synapse.v1.14.0, git-hash=fad45b2, git-branch=1.14.0 +[2024-04-24 07:22:16,123] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False +[2024-04-24 07:22:16,123] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] Creating BF16 optimizer +[2024-04-24 07:22:16,199] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,200] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,201] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,202] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,204] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,215] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,274] [INFO] [utils.py:824:see_memory_usage] begin bf16_optimizer +[2024-04-24 07:22:16,278] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 2.63 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,278] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.0 GB, percent = 36.8% +[2024-04-24 07:22:16,325] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,412] [INFO] [utils.py:824:see_memory_usage] before initializing group 0 +[2024-04-24 07:22:16,415] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,416] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 370.89 GB, percent = 36.8% +[2024-04-24 07:22:16,748] [INFO] [utils.py:824:see_memory_usage] after initializing group 0 +[2024-04-24 07:22:16,752] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 5.22 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,752] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:16,858] [INFO] [utils.py:824:see_memory_usage] before initializing group 1 +[2024-04-24 07:22:16,862] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,862] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:16,982] [INFO] [utils.py:824:see_memory_usage] after initializing group 1 +[2024-04-24 07:22:16,986] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 10.43 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,986] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:17,116] [INFO] [utils.py:824:see_memory_usage] before initialize_optimizer +[2024-04-24 07:22:17,120] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,120] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.03 GB, percent = 36.8% +[2024-04-24 07:22:17,251] [INFO] [utils.py:824:see_memory_usage] end initialize_optimizer +[2024-04-24 07:22:17,255] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,255] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.04 GB, percent = 36.8% +[2024-04-24 07:22:17,388] [INFO] [utils.py:824:see_memory_usage] end bf16_optimizer +[2024-04-24 07:22:17,392] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,392] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.02 GB, percent = 36.8% +[2024-04-24 07:22:17,393] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = BF16_Optimizer +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.95), (0.9, 0.95)] +[2024-04-24 07:22:17,394] [INFO] [config.py:992:print] DeepSpeedEngine configuration: +[2024-04-24 07:22:17,394] [INFO] [config.py:996:print] activation_checkpointing_config { + "partition_activations": false, + "contiguous_memory_optimization": false, + "cpu_checkpointing": false, + "number_checkpoints": null, + "synchronize_checkpoint_boundary": false, + "profile": false +} +[2024-04-24 07:22:17,394] [INFO] [config.py:996:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] amp_enabled .................. False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] amp_params ................... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] autotuning_config ............ { + "enabled": false, + "start_step": null, + "end_step": null, + "metric_path": null, + "arg_mappings": null, + "metric": "throughput", + "model_info": null, + "results_dir": "autotuning_results", + "exps_dir": "autotuning_exps", + "overwrite": true, + "fast": true, + "start_profile_step": 3, + "end_profile_step": 5, + "tuner_type": "gridsearch", + "tuner_early_stopping": 5, + "tuner_num_trials": 50, + "model_info_path": null, + "mp_size": 1, + "max_train_batch_size": null, + "min_train_batch_size": 1, + "max_train_micro_batch_size_per_gpu": 1.024000e+03, + "min_train_micro_batch_size_per_gpu": 1, + "num_tuning_micro_batch_sizes": 3 +} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] bfloat16_accumulate_grads_via_hooks True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] bfloat16_enabled ............. True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_parallel_write_pipeline False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_tag_validation_enabled True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_tag_validation_fail False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] comms_config ................. +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] communication_data_type ...... None +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] curriculum_enabled_legacy .... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] curriculum_params_legacy ..... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] data_efficiency_enabled ...... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dataloader_drop_last ......... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] disable_allgather ............ False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dump_state ................... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dynamic_loss_scale_args ...... None +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_enabled ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_gas_boundary_resolution 1 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_layer_name ........ bert.encoder.layer +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_layer_num ......... 0 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_max_iter .......... 100 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_stability ......... 1e-06 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_tol ............... 0.01 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_verbose ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] elasticity_enabled ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] flops_profiler_config ........ { + "enabled": false, + "recompute_fwd_factor": 0.0, + "profile_step": 1, + "module_depth": -1, + "top_modules": 1, + "detailed": true, + "output_file": null +} +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] fp16_auto_cast ............... None +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] fp16_enabled ................. False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] fp16_master_weights_and_gradients False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] global_rank .................. 0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] grad_accum_dtype ............. None +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_accumulation_steps .. 128 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_clipping ............ 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_predivide_factor .... 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] initial_dynamic_scale ........ 1 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] load_universal_checkpoint .... False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] loss_scale ................... 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] memory_breakdown ............. False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] mics_hierarchial_params_gather False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] mics_shard_size .............. -1 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] nebula_config ................ { + "enabled": false, + "persistent_storage_path": null, + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true, + "load_path": null +} +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] optimizer_legacy_fusion ...... False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] optimizer_name ............... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] optimizer_params ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': False, 'grad_partitioned': False} +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pld_enabled .................. False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pld_params ................... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] prescale_gradients ........... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] scheduler_name ............... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] scheduler_params ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] seq_parallel_communication_data_type torch.float32 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] sparse_attention ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] sparse_gradients_enabled ..... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] steps_per_print .............. 10 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] train_batch_size ............. 256 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] train_micro_batch_size_per_gpu 1 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] use_data_before_expert_parallel_ False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] use_node_local_storage ....... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] wall_clock_breakdown ......... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] weight_quantization_config ... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] world_size ................... 2 +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_allow_comm_data_type_fp32 False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_allow_untested_optimizer False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_config .................. stage=0 contiguous_gradients=True reduce_scatter=False reduce_bucket_size=500,000,000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False use_all_reduce_for_fetch_params=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_enabled ................. False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_force_ds_cpu_optimizer .. True +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_optimization_stage ...... 0 +[2024-04-24 07:22:17,399] [INFO] [config.py:982:print_user_config] json = { + "train_batch_size": 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": { + "enabled": false + }, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} +[2024-04-24 07:22:17,399] [INFO] [engine.py:99:__init__] CONFIG: micro_batches=128 micro_batch_size=1 +[2024-04-24 07:22:17,399] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=0 STAGE=0 LAYERS=11 [0, 11) STAGE_PARAMS=1397964800 (1397.965M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=1 STAGE=0 LAYERS=11 [0, 11) STAGE_PARAMS=1397964800 (1397.965M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=4 STAGE=1 LAYERS=13 [11, 24) STAGE_PARAMS=1397969920 (1397.970M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,670] [INFO] [engine.py:180:__init__] RANK=5 STAGE=1 LAYERS=13 [11, 24) STAGE_PARAMS=1397969920 (1397.970M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,672] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,673] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +WARNING: could not find the metadata file /data/output/llama13b_5M//checkpoints_zero_stage_2 +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. + will not load any checkpoints and will start from random +time (ms) | load-checkpoint: 2.70 +[after model, optimizer, and learning rate scheduler are built] datetime: 2024-04-24 07:22:17 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 2560000 + validation: 1282560 + test: 2560 +> building train, validation, and test datasets for GPT ... +Single data path provided for train, valid & test + > building dataset index ... + reading sizes... + reading pointers... + reading document index... + creating numpy buffer of mmap... + creating memory view of numpy buffer... + > finished creating indexed dataset in 0.010700 seconds + number of documents: 1558306 + > dataset split: + train: + document indices in [0, 1509999) total of 1509999 documents + validation: + document indices in [1509999, 1556748) total of 46749 documents + test: + document indices in [1556748, 1558306) total of 1558 documents +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy + + > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.004 seconds + total number of samples: 15244235 + total number of epochs: 1 +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy + + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy + > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + + + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.003 seconds + total number of samples: 1443484 + total number of epochs: 3 +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy + + + > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + + + > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.032 seconds + total number of samples: 16581 + total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2024-04-24 07:22:19 +done with setup ... +training ... +time (ms) | model-and-optimizer-setup: 2739.98 | train/valid/test-data-iterators-setup: 1518.83 +[before the start of training step] datetime: 2024-04-24 07:22:19 +******************INIT_SAVING_20************************************INIT_SAVING_20****************** + +******************INIT_SAVING_20************************************INIT_SAVING_20************************************INIT_SAVING_20************************************INIT_SAVING_20****************** +******************INIT_SAVING_20****************** + + + + iteration 10/ 10000 | consumed samples: 2560 | consumed tokens: 5242880 | elapsed time per iteration (ms): 0.5 | learning rate: 0.000E+00 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 514096.440 | TFLOPs: 4392335.75 | + iteration 20/ 10000 | consumed samples: 5120 | consumed tokens: 10485760 | elapsed time per iteration (ms): 0.6 | learning rate: 0.000E+00 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 431793.873 | TFLOPs: 3689159.30 | +******************INIT_SAVING_20****************** +saving checkpoint at iteration 20 to /data/output/llama13b_5M//checkpoints_zero_stage_2 +[2024-04-24 07:22:19,375] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step0 is about to be saved! +[2024-04-24 07:22:19,569] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_00-model_states.pt... +[2024-04-24 07:22:19,574] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_01-model_states.pt... +[2024-04-24 07:22:19,615] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_01-model_states.pt... +[2024-04-24 07:22:19,624] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_00-model_states.pt... +[2024-04-24 07:22:19,814] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_00-model_states.pt. +[2024-04-24 07:22:19,835] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_01-model_states.pt. +[2024-04-24 07:22:19,910] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_01-model_states.pt. +[2024-04-24 07:22:19,914] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_00-model_states.pt... +[2024-04-24 07:22:19,938] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_01-model_states.pt... +[2024-04-24 07:22:19,944] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_00-model_states.pt. +[2024-04-24 07:22:20,001] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_01-model_states.pt... +[2024-04-24 07:22:20,035] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_00-model_states.pt... +[2024-04-24 07:22:20,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_00-model_states.pt. +[2024-04-24 07:22:20,276] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_01-model_states.pt. +[2024-04-24 07:22:20,292] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_01-model_states.pt. +[2024-04-24 07:22:20,326] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_00-model_states.pt. +[2024-04-24 07:22:20,332] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_00-model_states.pt... +[2024-04-24 07:22:20,367] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_01-model_states.pt... +[2024-04-24 07:22:20,376] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_01-model_states.pt... +[2024-04-24 07:22:20,426] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_00-model_states.pt... +[2024-04-24 07:22:20,617] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_00-model_states.pt. +[2024-04-24 07:22:20,682] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_01-model_states.pt. +[2024-04-24 07:22:20,686] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_00-model_states.pt... +[2024-04-24 07:22:20,696] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_01-model_states.pt. +[2024-04-24 07:22:20,727] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_00-model_states.pt. +[2024-04-24 07:22:20,770] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_01-model_states.pt... +[2024-04-24 07:22:20,783] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_01-model_states.pt... +[2024-04-24 07:22:20,822] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_00-model_states.pt... +[2024-04-24 07:22:20,986] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_00-model_states.pt. +[2024-04-24 07:22:21,055] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_00-model_states.pt... +[2024-04-24 07:22:21,090] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_01-model_states.pt. +[2024-04-24 07:22:21,116] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_00-model_states.pt. +[2024-04-24 07:22:21,148] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_01-model_states.pt. +[2024-04-24 07:22:21,171] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_01-model_states.pt... +[2024-04-24 07:22:21,198] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_00-model_states.pt... +[2024-04-24 07:22:21,236] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_01-model_states.pt... +[2024-04-24 07:22:21,341] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_00-model_states.pt. +[2024-04-24 07:22:21,414] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_00-model_states.pt... +[2024-04-24 07:22:21,475] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_01-model_states.pt. +[2024-04-24 07:22:21,508] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_00-model_states.pt. +[2024-04-24 07:22:21,549] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_01-model_states.pt. +[2024-04-24 07:22:21,563] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_01-model_states.pt... +[2024-04-24 07:22:21,593] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_00-model_states.pt... +[2024-04-24 07:22:21,639] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_01-model_states.pt... +[2024-04-24 07:22:21,695] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_00-model_states.pt. +[2024-04-24 07:22:21,769] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_00-model_states.pt... +[2024-04-24 07:22:21,866] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_01-model_states.pt. +[2024-04-24 07:22:21,881] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_00-model_states.pt. +[2024-04-24 07:22:21,946] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_01-model_states.pt... +[2024-04-24 07:22:21,958] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_00-model_states.pt... +[2024-04-24 07:22:21,969] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_01-model_states.pt. +[2024-04-24 07:22:22,044] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_01-model_states.pt... +[2024-04-24 07:22:22,067] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_00-model_states.pt. +[2024-04-24 07:22:22,142] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_00-model_states.pt... +[2024-04-24 07:22:22,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_01-model_states.pt. +[2024-04-24 07:22:22,264] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_00-model_states.pt. +[2024-04-24 07:22:22,337] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_01-model_states.pt... +[2024-04-24 07:22:22,346] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_00-model_states.pt... +[2024-04-24 07:22:22,348] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_01-model_states.pt. +[2024-04-24 07:22:22,418] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_00-model_states.pt. +[2024-04-24 07:22:22,429] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_01-model_states.pt... +[2024-04-24 07:22:22,488] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_00-model_states.pt... +[2024-04-24 07:22:22,628] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_01-model_states.pt. +[2024-04-24 07:22:22,643] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_00-model_states.pt. +[2024-04-24 07:22:22,647] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_00-model_states.pt... +[2024-04-24 07:22:22,652] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_00-model_states.pt. +[2024-04-24 07:22:22,708] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_01-model_states.pt... +[2024-04-24 07:22:22,723] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_01-model_states.pt. +[2024-04-24 07:22:22,726] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_01-model_states.pt... +[2024-04-24 07:22:22,729] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_00-model_states.pt... +[2024-04-24 07:22:22,738] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_01-model_states.pt. +[2024-04-24 07:22:22,756] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_00-model_states.pt. +[2024-04-24 07:22:22,758] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt +[2024-04-24 07:22:22,758] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt... +[2024-04-24 07:22:22,780] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt. +[2024-04-24 07:22:22,783] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:22,785] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:22,807] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_01-model_states.pt... +[2024-04-24 07:22:22,983] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_00-model_states.pt. +[2024-04-24 07:22:22,985] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_02_model_states.pt... +[2024-04-24 07:22:23,009] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_02_model_states.pt. +[2024-04-24 07:22:23,013] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:23,014] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:23,016] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_01-model_states.pt. +[2024-04-24 07:22:23,018] [INFO] [logging.py:96:log_dist] [Rank 1] Saving model checkpoint: /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt +[2024-04-24 07:22:23,018] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt... +[2024-04-24 07:22:23,036] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt. +[2024-04-24 07:22:23,039] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:23,040] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:23,059] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_01-model_states.pt. +[2024-04-24 07:22:23,061] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_03_model_states.pt... +[2024-04-24 07:22:23,081] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_03_model_states.pt. +[2024-04-24 07:22:23,083] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:23,085] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,301] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:36,302] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:36,438] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:36,439] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:36,440] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:36,441] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,796] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:36,797] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,821] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:36,822] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:38,082] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +[2024-04-24 07:22:38,083] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:38,152] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +[2024-04-24 07:22:38,152] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:38,158] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:38,158] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:51,480] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:56,262] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:56,262] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt +[2024-04-24 07:22:56,262] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:56,904] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +[2024-04-24 07:22:56,904] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,019] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:57,020] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt +[2024-04-24 07:22:57,020] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,693] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:57,694] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt +[2024-04-24 07:22:57,694] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,834] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:57,834] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +[2024-04-24 07:22:57,834] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,852] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:57,853] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt +[2024-04-24 07:22:57,853] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:23:02,350] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +[2024-04-24 07:23:02,351] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt +[2024-04-24 07:23:02,351] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:23:03,032] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +[2024-04-24 07:23:03,032] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt +[2024-04-24 07:23:03,033] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +Couldn't save model in huggingface format +Couldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface format + +Couldn't save model in huggingface format +Couldn't save model in huggingface format + + + + successfully saved checkpoint at iteration 20 to /data/output/llama13b_5M//checkpoints_zero_stage_2 +Checkpoint Save GB: 78.286, GB/Sec: 1.79, Latency(second): 43.674 +time (ms) | save-checkpoint: 43676.29 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:23:07,111] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +Convert DeepSpeed Checkpoint to Universal Checkpoint +args = Namespace(input_folder='/data/output/llama13b_5M//checkpoints_zero_stage_2', output_folder='/data/output/univ_ckpt_new', num_extract_workers=4, num_merge_workers=2) +Converting DeepSpeed checkpoint in /data/output/llama13b_5M//checkpoints_zero_stage_2 to Universal checkpoint in /data/output/univ_ckpt_new +*** 1. Extracting ZeRO fragments + 0%| | 0/2 [00:00 + main() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 329, in main + model = create_model(args, config) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 80, in create_model + model_config.vocab_size = config['MODEL']['vocab_size'] +KeyError: 'vocab_size' +Using device 'hpu' +/usr/local/lib/python3.10/dist-packages/torch/_utils.py:842: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for hellaswag contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hellaswag +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Task: hellaswag; number of docs: 10042 +Task: hellaswag; document 0; context prompt (starting on next line): +Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you. +(end of prompt on previous line) +Requests: [Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' You can visit a lingerie shop and have them measure you to help you fit a bra to your size, or measure yourself before you shop for a new bra to ensure that you get a good fit. Use a flexible tape measure, like one found in a sewing kit.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' This is why it is important to keep your breasts under protection when in the shower and only wear bras that are larger than your breast size. If you are not wearing a bra, try wearing something that is a little bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' For a girl, a bra with a support strap will be easier for her, because most women are unable to pull through bra straps and bras that are too small will not be able to support breasts from side-to-side. Many bras have even been created that cover the breast side, and can be sent to other women in the world to make them look bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' Choose a color that is flattering to your breast type and specific event, in addition to those that make you uncomfortable. Look for sports bras made from natural material, such as spandex or lycra, as this is a more breathable bra.')[0] +] +Running loglikelihood requests + 0%| | 0/40145 [00:00 + main() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 329, in main + model = create_model(args, config) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 80, in create_model + model_config.vocab_size = config['MODEL']['vocab_size'] +KeyError: 'vocab_size' +Using device 'hpu' +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for hellaswag contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hellaswag +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Task: hellaswag; number of docs: 10042 +Task: hellaswag; document 0; context prompt (starting on next line): +Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you. +(end of prompt on previous line) +Requests: [Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' You can visit a lingerie shop and have them measure you to help you fit a bra to your size, or measure yourself before you shop for a new bra to ensure that you get a good fit. Use a flexible tape measure, like one found in a sewing kit.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' This is why it is important to keep your breasts under protection when in the shower and only wear bras that are larger than your breast size. If you are not wearing a bra, try wearing something that is a little bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' For a girl, a bra with a support strap will be easier for her, because most women are unable to pull through bra straps and bras that are too small will not be able to support breasts from side-to-side. Many bras have even been created that cover the breast side, and can be sent to other women in the world to make them look bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' Choose a color that is flattering to your breast type and specific event, in addition to those that make you uncomfortable. Look for sports bras made from natural material, such as spandex or lycra, as this is a more breathable bra.')[0] +] +Running loglikelihood requests + 0%| | 0/40145 [00:00, {'100.83.134.132': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2024-04-24 07:22:00,960] [INFO] [launch.py:164:main] dist_world_size=8 +[2024-04-24 07:22:00,960] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,851] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,852] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,876] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,908] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,920] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,921] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,925] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:22:02,931] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 5 +args.world_size: 8 +args.rank: 5 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 4 +args.world_size: 8 +args.rank: 4 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 1 +args.world_size: 8 +args.rank: 1 +args.distributed_backend: hccl +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +_initialize_distributed: Initializing with below params: +args.local_rank: 3 +args.world_size: 8 +args.rank: 3 +args.distributed_backend: hccl +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +using world size: 8, data-parallel-size: 2, tensor-model-parallel size: 2, pipeline-model-parallel size: 2 +accumulate and all-reduce gradients in fp32 for bfloat16 data type. +using torch.bfloat16 for parameters ... +------------------------ arguments ------------------------ + accumulate_allreduce_grads_in_fp32 .............. True + activation_func_type ............................ swiglu + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.95 + adam_eps ........................................ 1e-06 + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + aml_data_download_path .......................... None + apply_layernorm_weight_plus_one ................. False + apply_query_key_layer_scaling ................... True + apply_residual_connection_post_layernorm ........ False + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + bert_binary_head ................................ True + bert_load ....................................... None + bf16 ............................................ True + bias_dropout_fusion ............................. False + bias_gelu_fusion ................................ False + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + cache_fp8_weight ................................ False + cache_fp8_weight_fwd ............................ True + checkpoint_activations .......................... False + checkpoint_activations_granularity .............. full + checkpoint_in_cpu ............................... False + checkpoint_num_layers ........................... 1 + clearml_config_path ............................. None + clearml_continue_exp ............................ False + clearml_exp_name ................................ None + clip_grad ....................................... 1.0 + compression_training ............................ False + consumed_train_samples .......................... 0 + consumed_train_tokens ........................... 0 + consumed_valid_samples .......................... 0 + contigious_checkpointing ........................ False + cpu_optimizer ................................... False + cpu_torch_adam .................................. False + create_moe_param_group .......................... False + curriculum_learning ............................. False + data_idx_path ................................... None + data_impl ....................................... infer + data_parallel_size .............................. 2 + data_path ....................................... ['/data/arxiv//tokenized_text_document'] + data_sharding ................................... True + dataloader_type ................................. single + DDP_impl ........................................ local + decoder_seq_length .............................. None + deepscale ....................................... False + deepscale_config ................................ None + deepspeed ....................................... True + deepspeed_activation_checkpointing .............. False + deepspeed_config ................................ /data/output/llama13b_5M//ds_config.json + deepspeed_mpi ................................... False + distribute_checkpointed_activations ............. False + distributed_backend ............................. hccl + do_layernorm_bias_weight_decay .................. False + do_pretrain_validation .......................... False + ds_inference .................................... False + ds_pipeline_enabled ............................. True + embed_layernorm ................................. False + embedding_path .................................. None + enable_expert_tensor_parallelism ................ False + encoder_seq_length .............................. 2048 + eod_mask_loss ................................... False + eval_interval ................................... 20 + eval_iters ...................................... 10 + eval_loss_exit_value ............................ None + eval_micro_batch_size ........................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... 0 + expert_interval ................................. 2 + ffn_hidden_coeff ................................ 2.6666666666666665 + ffn_hidden_size ................................. 13824 + finetune ........................................ False + fix_position_emb_redundant_alloc ................ False + flatten_linear_operands ......................... False + fp16 ............................................ False + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + global_batch_size ............................... 256 + hidden_dropout .................................. 0.1 + hidden_size ..................................... 5120 + hidden_size_teacher ............................. None + hpu_deterministic ............................... True + hpu_fp8_format .................................. e5m2 + hpu_fp8_measure_interval ........................ 10 + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_dim ......................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference ....................................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + initial_loss_scale .............................. 4294967296 + kd .............................................. False + kd_alpha_ce ..................................... 1 + kd_beta_ce ...................................... 1 + kd_temp ......................................... 1.0 + kill_switch_path ................................ None + kv_channels ..................................... 128 + layernorm_epsilon ............................... 1e-06 + layernorm_type .................................. rmsnorm + lazy_mpu_init ................................... None + load ............................................ /data/output/llama13b_5M//checkpoints_zero_stage_2 + load_teacher .................................... None + local_rank ...................................... 0 + log_batch_size_to_tensorboard ................... True + log_bwd_grads ................................... False + log_fwd_activations ............................. False + log_interval .................................... 10 + log_learning_rate_to_tensorboard ................ True + log_loss_scale_to_tensorboard ................... True + log_model_inputs ................................ False + log_num_zeros_in_grad ........................... False + log_optimizer_states_to_tensorboard ............. False + log_params_norm ................................. False + log_timers_to_tensorboard ....................... True + log_validation_ppl_to_tensorboard ............... True + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0003 + lr_decay_iters .................................. None + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_decay_tokens ................................. None + lr_warmup_fraction .............................. None + lr_warmup_iters ................................. 2000 + lr_warmup_samples ............................... 0 + lr_warmup_tokens ................................ None + make_vocab_size_divisible_by .................... 128 + mask_prob ....................................... 0.15 + mask_tensor_adding .............................. False + masked_softmax_fusion ........................... False + max_position_embeddings ......................... None + memory_centric_tiled_linear ..................... False + merge_file ...................................... /data/arxiv//gpt2-merges.txt + micro_batch_size ................................ 1 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_type ........................................ standard + mmap_warmup ..................................... False + moe_eval_capacity_factor ........................ 1.0 + moe_expert_parallel_size ........................ 1 + moe_loss_coeff .................................. 0.1 + moe_min_capacity ................................ 4 + moe_token_dropping .............................. True + moe_train_capacity_factor ....................... 1.0 + mos ............................................. False + no_bias ......................................... True + no_cuda ......................................... False + no_load_lr_state ................................ False + no_load_optim ................................... None + no_load_rng ..................................... None + no_pipeline_parallel ............................ False + no_save_optim ................................... None + no_save_rng ..................................... None + no_scaled_init .................................. False + num_attention_heads ............................. 40 + num_attention_heads_teacher ..................... None + num_channels .................................... 3 + num_classes ..................................... 1000 + num_experts ..................................... [1] + num_experts_teacher ............................. [1] + num_key_value_heads ............................. 40 + num_layers ...................................... 16 + num_layers_per_virtual_pipeline_stage ........... None + num_layers_teacher .............................. None + num_workers ..................................... 2 + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adamw + override_lr_scheduler ........................... False + params_dtype .................................... torch.bfloat16 + partition_activations ........................... False + patch_dim ....................................... 16 + pipeline_model_parallel_size .................... 2 + position_embedding_type ......................... PositionEmbeddingType.rotary + profile ......................................... None + profile_backward ................................ False + profile_steps ................................... 2,3 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + remote_device ................................... none + reset_attention_mask ............................ False + reset_iteration ................................. False + reset_position_ids .............................. False + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + sample_rate ..................................... 1.0 + save ............................................ /data/output/llama13b_5M//checkpoints_zero_stage_2 + save_interval ................................... 20 + scatter_gather_tensors_in_pipeline .............. True + scattered_embeddings ............................ False + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... True + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + split ........................................... 969, 30, 1 + split_transformers .............................. False + synchronize_each_layer .......................... False + tensor_logger_max_iter .......................... 0 + tensor_logger_path .............................. None + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. /data/output/llama13b_5M//tensorboard + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + tile_factor ..................................... 1 + titles_data_path ................................ None + tokenizer_eod_id ................................ None + tokenizer_model_file ............................ None + tokenizer_type .................................. GPT2BPETokenizer + topk ............................................ 1 + train_data_path ................................. None + train_iters ..................................... 10000 + train_samples ................................... None + train_tokens .................................... None + universal_checkpoint ............................ False + use_checkpoint_lr_scheduler ..................... False + use_contiguous_buffers_in_ddp ................... True + use_cpu_initialization .......................... None + use_fused_sdpa .................................. True + use_fused_sdpa_with_recompute ................... False + use_hpu ......................................... True + use_hpu_fp8_transformer_engine .................. False + use_hpu_graphs .................................. False + use_one_sent_docs ............................... False + use_pin_memory .................................. False + use_rotary_v2 ................................... False + use_seq_len_plus_one_tokens ..................... True + use_torch_compile ............................... False + use_tutel ....................................... False + valid_data_path ................................. None + verify_checkpoint ............................... True + verify_checkpoint_model_type .................... LLAMA + verify_tp_workers ............................... False + verify_tp_workers_hash .......................... False + virtual_pipeline_model_parallel_size ............ None + vocab_extra_ids ................................. 0 + vocab_file ...................................... /data/arxiv//gpt2-vocab.json + weight_decay .................................... 0.1 + world_size ...................................... 8 + zero_allgather_bucket_size ...................... 0.0 + zero_contigious_gradients ....................... False + zero_reduce_bucket_size ......................... 0.0 + zero_reduce_scatter ............................. False + zero_stage ...................................... 0 +-------------------- end of arguments --------------------- +setting number of micro-batches to constant 128 +setting number of micro-batches to constant 128 +> building GPT2BPETokenizer tokenizer ... +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,063] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,063] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,069] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,069] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 6 +args.world_size: 8 +args.rank: 6 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,086] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,086] [INFO] [comm.py:637:init_distributed] cdb=None +> setting tensorboard ... +_initialize_distributed: Initializing with below params: +args.local_rank: 7 +args.world_size: 8 +args.rank: 7 +args.distributed_backend: hccl + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +_initialize_distributed: Initializing with below params: +args.local_rank: 0 +args.world_size: 8 +args.rank: 0 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,101] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,101] [INFO] [comm.py:637:init_distributed] cdb=None +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,136] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,136] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,152] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,152] [INFO] [comm.py:637:init_distributed] cdb=None +fatal: not a git repository (or any parent up to mount point /) +Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set). +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +> initializing torch distributed ... +[2024-04-24 07:22:06,173] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,173] [INFO] [comm.py:637:init_distributed] cdb=None +[2024-04-24 07:22:06,173] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 2 +args.world_size: 8 +args.rank: 2 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-24 07:22:06,282] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-24 07:22:06,282] [INFO] [comm.py:637:init_distributed] cdb=None +> initializing tensor model parallel with size 2 +> initializing pipeline model parallel with size 2 +> setting random seeds to 1234 ... +> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +============================= HABANA PT BRIDGE CONFIGURATION =========================== + PT_HPU_LAZY_MODE = 1 + PT_RECIPE_CACHE_PATH = + PT_CACHE_FOLDER_DELETE = 0 + PT_HPU_RECIPE_CACHE_CONFIG = + PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807 + PT_HPU_LAZY_ACC_PAR_MODE = 0 + PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0 +---------------------------: System Configuration :--------------------------- +Num CPU Cores : 160 +CPU RAM : 1056375244 KB +------------------------------------------------------------------------------ +> compiling dataset index builder ... +make: Entering directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +make: Nothing to be done for 'default'. +make: Leaving directory '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/data' +>>> done with dataset index builder. Compilation time: 0.180 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 0.004 seconds +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Currently logged in as: advaiddeepak0602 (bharatgpt). Use `wandb login --relogin` to force relogin +wandb: Appending key for api.wandb.ai to your netrc file: /root/.netrc +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-mp0t9uy4 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run youthful-wildflower-2012 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/mp0t9uy4 +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-w9athpv9 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run different-planet-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/w9athpv9 +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-211n5b2u +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run jolly-pyramid-2013 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/211n5b2u +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-tbva9yik +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run earthy-plasma-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/tbva9yik +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-h27k7fos +wandb: Run `wandb offline` to turn off syncing. +wandb: Tracking run with wandb version 0.16.6 +wandb: Syncing run pleasant-glitter-2017 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/h27k7fos +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-pu9rwbfz +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run zany-snow-2014 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/pu9rwbfz +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-ph1uqt0g +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run hopeful-pine-2018 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/ph1uqt0g +wandb: Tracking run with wandb version 0.16.6 +wandb: Run data is saved locally in /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/run-20240424_072213-amj6vf90 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run swept-sunset-2019 +wandb: ⭐️ View project at https://wandb.ai/bharatgpt/llama_runs +wandb: 🚀 View run at https://wandb.ai/bharatgpt/llama_runs/runs/amj6vf90 +time to initialize megatron (seconds): 38.936 +[after megatron is initialized] datetime: 2024-04-24 07:22:14 +building LLaMA model ... +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +*************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** + + + +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ********************************* Using FusedSDPA ********************************* Using FusedSDPA ****************** + + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** + +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ********************************* Using FusedSDPA ****************** + +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/weight_sharing.py:53: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return super().__torch_function__(func, types, new_args, kwargs) + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 1397964800 + > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 1397969920 + > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 1397969920 +[2024-04-24 07:22:15,116] [INFO] [utils.py:824:see_memory_usage] Before Building Model +[2024-04-24 07:22:15,119] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:15,120] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 369.82 GB, percent = 36.7% +SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=0, model=1): 1, ProcessCoord(pipe=0, data=1, model=0): 2, ProcessCoord(pipe=0, data=1, model=1): 3, ProcessCoord(pipe=1, data=0, model=0): 4, ProcessCoord(pipe=1, data=0, model=1): 5, ProcessCoord(pipe=1, data=1, model=0): 6, ProcessCoord(pipe=1, data=1, model=1): 7} +[2024-04-24 07:22:15,122] [INFO] [module.py:375:_partition_layers] Partitioning pipeline stages with method type:transformer +stage=0 layers=11 + 0: _to_float16 + 1: EmbeddingPipe + 2: + 3: ParallelTransformerLayerPipe + 4: ParallelTransformerLayerPipe + 5: ParallelTransformerLayerPipe + 6: ParallelTransformerLayerPipe + 7: ParallelTransformerLayerPipe + 8: ParallelTransformerLayerPipe + 9: ParallelTransformerLayerPipe + 10: ParallelTransformerLayerPipe +stage=1 layers=13 + 11: ParallelTransformerLayerPipe + 12: ParallelTransformerLayerPipe + 13: ParallelTransformerLayerPipe + 14: ParallelTransformerLayerPipe + 15: ParallelTransformerLayerPipe + 16: ParallelTransformerLayerPipe + 17: ParallelTransformerLayerPipe + 18: ParallelTransformerLayerPipe + 19: + 20: WrapName + 21: WrapName + 22: + 23: float16_to_fp32 + loss: CrossEntropy +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +*************** Using FusedSDPA ****************** +[2024-04-24 07:22:15,303] [INFO] [utils.py:824:see_memory_usage] After Building Model +[2024-04-24 07:22:15,306] [INFO] [utils.py:825:see_memory_usage] MA 0.01 GB Max_MA 0.01 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:15,307] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 370.67 GB, percent = 36.8% + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 1397964800 +> learning rate decay style: cosine +DeepSpeed is enabled. +[2024-04-24 07:22:15,310] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.4+hpu.synapse.v1.14.0, git-hash=fad45b2, git-branch=1.14.0 +[2024-04-24 07:22:16,123] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False +[2024-04-24 07:22:16,123] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW +[2024-04-24 07:22:16,124] [INFO] [logging.py:96:log_dist] [Rank 0] Creating BF16 optimizer +[2024-04-24 07:22:16,199] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,200] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,201] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,202] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,204] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,215] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,274] [INFO] [utils.py:824:see_memory_usage] begin bf16_optimizer +[2024-04-24 07:22:16,278] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 2.63 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,278] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.0 GB, percent = 36.8% +[2024-04-24 07:22:16,325] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:16,412] [INFO] [utils.py:824:see_memory_usage] before initializing group 0 +[2024-04-24 07:22:16,415] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,416] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 370.89 GB, percent = 36.8% +[2024-04-24 07:22:16,748] [INFO] [utils.py:824:see_memory_usage] after initializing group 0 +[2024-04-24 07:22:16,752] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 5.22 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,752] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:16,858] [INFO] [utils.py:824:see_memory_usage] before initializing group 1 +[2024-04-24 07:22:16,862] [INFO] [utils.py:825:see_memory_usage] MA 2.62 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,862] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:16,982] [INFO] [utils.py:824:see_memory_usage] after initializing group 1 +[2024-04-24 07:22:16,986] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 10.43 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:16,986] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.01 GB, percent = 36.8% +[2024-04-24 07:22:17,116] [INFO] [utils.py:824:see_memory_usage] before initialize_optimizer +[2024-04-24 07:22:17,120] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,120] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.03 GB, percent = 36.8% +[2024-04-24 07:22:17,251] [INFO] [utils.py:824:see_memory_usage] end initialize_optimizer +[2024-04-24 07:22:17,255] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,255] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.04 GB, percent = 36.8% +[2024-04-24 07:22:17,388] [INFO] [utils.py:824:see_memory_usage] end bf16_optimizer +[2024-04-24 07:22:17,392] [INFO] [utils.py:825:see_memory_usage] MA 10.43 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB +[2024-04-24 07:22:17,392] [INFO] [utils.py:832:see_memory_usage] CPU Virtual Memory: used = 371.02 GB, percent = 36.8% +[2024-04-24 07:22:17,393] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = BF16_Optimizer +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = +[2024-04-24 07:22:17,394] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.95), (0.9, 0.95)] +[2024-04-24 07:22:17,394] [INFO] [config.py:992:print] DeepSpeedEngine configuration: +[2024-04-24 07:22:17,394] [INFO] [config.py:996:print] activation_checkpointing_config { + "partition_activations": false, + "contiguous_memory_optimization": false, + "cpu_checkpointing": false, + "number_checkpoints": null, + "synchronize_checkpoint_boundary": false, + "profile": false +} +[2024-04-24 07:22:17,394] [INFO] [config.py:996:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] amp_enabled .................. False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] amp_params ................... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] autotuning_config ............ { + "enabled": false, + "start_step": null, + "end_step": null, + "metric_path": null, + "arg_mappings": null, + "metric": "throughput", + "model_info": null, + "results_dir": "autotuning_results", + "exps_dir": "autotuning_exps", + "overwrite": true, + "fast": true, + "start_profile_step": 3, + "end_profile_step": 5, + "tuner_type": "gridsearch", + "tuner_early_stopping": 5, + "tuner_num_trials": 50, + "model_info_path": null, + "mp_size": 1, + "max_train_batch_size": null, + "min_train_batch_size": 1, + "max_train_micro_batch_size_per_gpu": 1.024000e+03, + "min_train_micro_batch_size_per_gpu": 1, + "num_tuning_micro_batch_sizes": 3 +} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] bfloat16_accumulate_grads_via_hooks True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] bfloat16_enabled ............. True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_parallel_write_pipeline False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_tag_validation_enabled True +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] checkpoint_tag_validation_fail False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] comms_config ................. +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] communication_data_type ...... None +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] curriculum_enabled_legacy .... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] curriculum_params_legacy ..... False +[2024-04-24 07:22:17,395] [INFO] [config.py:996:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] data_efficiency_enabled ...... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dataloader_drop_last ......... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] disable_allgather ............ False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dump_state ................... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] dynamic_loss_scale_args ...... None +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_enabled ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_gas_boundary_resolution 1 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_layer_name ........ bert.encoder.layer +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_layer_num ......... 0 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_max_iter .......... 100 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_stability ......... 1e-06 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_tol ............... 0.01 +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] eigenvalue_verbose ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] elasticity_enabled ........... False +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] flops_profiler_config ........ { + "enabled": false, + "recompute_fwd_factor": 0.0, + "profile_step": 1, + "module_depth": -1, + "top_modules": 1, + "detailed": true, + "output_file": null +} +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] fp16_auto_cast ............... None +[2024-04-24 07:22:17,396] [INFO] [config.py:996:print] fp16_enabled ................. False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] fp16_master_weights_and_gradients False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] global_rank .................. 0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] grad_accum_dtype ............. None +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_accumulation_steps .. 128 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_clipping ............ 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] gradient_predivide_factor .... 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] initial_dynamic_scale ........ 1 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] load_universal_checkpoint .... False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] loss_scale ................... 1.0 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] memory_breakdown ............. False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] mics_hierarchial_params_gather False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] mics_shard_size .............. -1 +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] nebula_config ................ { + "enabled": false, + "persistent_storage_path": null, + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true, + "load_path": null +} +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] optimizer_legacy_fusion ...... False +[2024-04-24 07:22:17,397] [INFO] [config.py:996:print] optimizer_name ............... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] optimizer_params ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': False, 'grad_partitioned': False} +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pld_enabled .................. False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] pld_params ................... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] prescale_gradients ........... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] scheduler_name ............... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] scheduler_params ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] seq_parallel_communication_data_type torch.float32 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] sparse_attention ............. None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] sparse_gradients_enabled ..... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] steps_per_print .............. 10 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] train_batch_size ............. 256 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] train_micro_batch_size_per_gpu 1 +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] use_data_before_expert_parallel_ False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] use_node_local_storage ....... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] wall_clock_breakdown ......... False +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] weight_quantization_config ... None +[2024-04-24 07:22:17,398] [INFO] [config.py:996:print] world_size ................... 2 +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_allow_comm_data_type_fp32 False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_allow_untested_optimizer False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_config .................. stage=0 contiguous_gradients=True reduce_scatter=False reduce_bucket_size=500,000,000 use_multi_rank_bucket_allreduce=True allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False use_all_reduce_for_fetch_params=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_enabled ................. False +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_force_ds_cpu_optimizer .. True +[2024-04-24 07:22:17,399] [INFO] [config.py:996:print] zero_optimization_stage ...... 0 +[2024-04-24 07:22:17,399] [INFO] [config.py:982:print_user_config] json = { + "train_batch_size": 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": { + "enabled": false + }, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} +[2024-04-24 07:22:17,399] [INFO] [engine.py:99:__init__] CONFIG: micro_batches=128 micro_batch_size=1 +[2024-04-24 07:22:17,399] [INFO] [engine.py:139:__init__] is_pipe_partitioned= False is_grad_partitioned= False +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=0 STAGE=0 LAYERS=11 [0, 11) STAGE_PARAMS=1397964800 (1397.965M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=1 STAGE=0 LAYERS=11 [0, 11) STAGE_PARAMS=1397964800 (1397.965M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,669] [INFO] [engine.py:180:__init__] RANK=4 STAGE=1 LAYERS=13 [11, 24) STAGE_PARAMS=1397969920 (1397.970M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,670] [INFO] [engine.py:180:__init__] RANK=5 STAGE=1 LAYERS=13 [11, 24) STAGE_PARAMS=1397969920 (1397.970M) TOTAL_PARAMS=5591869440 (5591.869M) UNIQUE_PARAMS=5591869440 (5591.869M) +[2024-04-24 07:22:17,672] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,673] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. +WARNING: could not find the metadata file /data/output/llama13b_5M//checkpoints_zero_stage_2 +[2024-04-24 07:22:17,674] [WARNING] [engine.py:2763:load_checkpoint] Unable to find latest file at /data/output/llama13b_5M//checkpoints_zero_stage_2/latest, if trying to load latest checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint. + will not load any checkpoints and will start from random +time (ms) | load-checkpoint: 2.70 +[after model, optimizer, and learning rate scheduler are built] datetime: 2024-04-24 07:22:17 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 2560000 + validation: 1282560 + test: 2560 +> building train, validation, and test datasets for GPT ... +Single data path provided for train, valid & test + > building dataset index ... + reading sizes... + reading pointers... + reading document index... + creating numpy buffer of mmap... + creating memory view of numpy buffer... + > finished creating indexed dataset in 0.010700 seconds + number of documents: 1558306 + > dataset split: + train: + document indices in [0, 1509999) total of 1509999 documents + validation: + document indices in [1509999, 1556748) total of 46749 documents + test: + document indices in [1556748, 1558306) total of 1558 documents +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy + + > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.004 seconds + total number of samples: 15244235 + total number of epochs: 1 +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_sample_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_train_indexmap_2560000ns_2048sl_1234s_shuffle_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy + + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npy + > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + + + +Loading dataset index file from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_valid_indexmap_1282560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.003 seconds + total number of samples: 1443484 + total number of epochs: 3 +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy + +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy + + + > loaded doc-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_doc_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npyLoading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + + + > loaded sample-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_sample_idx.npy +Loading dataset index file from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + > loaded shuffle-idx mapping from /data/arxiv//tokenized_text_document_test_indexmap_2560ns_2048sl_1234s_shuffle_idx.npy + loaded indexed file in 0.032 seconds + total number of samples: 16581 + total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2024-04-24 07:22:19 +done with setup ... +training ... +time (ms) | model-and-optimizer-setup: 2739.98 | train/valid/test-data-iterators-setup: 1518.83 +[before the start of training step] datetime: 2024-04-24 07:22:19 +******************INIT_SAVING_20************************************INIT_SAVING_20****************** + +******************INIT_SAVING_20************************************INIT_SAVING_20************************************INIT_SAVING_20************************************INIT_SAVING_20****************** +******************INIT_SAVING_20****************** + + + + iteration 10/ 10000 | consumed samples: 2560 | consumed tokens: 5242880 | elapsed time per iteration (ms): 0.5 | learning rate: 0.000E+00 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 514096.440 | TFLOPs: 4392335.75 | + iteration 20/ 10000 | consumed samples: 5120 | consumed tokens: 10485760 | elapsed time per iteration (ms): 0.6 | learning rate: 0.000E+00 | global batch size: 256 | loss scale: 0.0 | grad norm: 1.000 | num zeros: 1.0 | number of skipped iterations: 10 | number of nan iterations: 0 | samples per second: 431793.873 | TFLOPs: 3689159.30 | +******************INIT_SAVING_20****************** +saving checkpoint at iteration 20 to /data/output/llama13b_5M//checkpoints_zero_stage_2 +[2024-04-24 07:22:19,375] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step0 is about to be saved! +[2024-04-24 07:22:19,569] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_00-model_states.pt... +[2024-04-24 07:22:19,574] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_01-model_states.pt... +[2024-04-24 07:22:19,615] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_01-model_states.pt... +[2024-04-24 07:22:19,624] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_00-model_states.pt... +[2024-04-24 07:22:19,814] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_00-model_states.pt. +[2024-04-24 07:22:19,835] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_01-model_01-model_states.pt. +[2024-04-24 07:22:19,910] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_01-model_states.pt. +[2024-04-24 07:22:19,914] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_00-model_states.pt... +[2024-04-24 07:22:19,938] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_01-model_states.pt... +[2024-04-24 07:22:19,944] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_11-model_00-model_states.pt. +[2024-04-24 07:22:20,001] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_01-model_states.pt... +[2024-04-24 07:22:20,035] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_00-model_states.pt... +[2024-04-24 07:22:20,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_00-model_states.pt. +[2024-04-24 07:22:20,276] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_03-model_01-model_states.pt. +[2024-04-24 07:22:20,292] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_01-model_states.pt. +[2024-04-24 07:22:20,326] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_12-model_00-model_states.pt. +[2024-04-24 07:22:20,332] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_00-model_states.pt... +[2024-04-24 07:22:20,367] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_01-model_states.pt... +[2024-04-24 07:22:20,376] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_01-model_states.pt... +[2024-04-24 07:22:20,426] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_00-model_states.pt... +[2024-04-24 07:22:20,617] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_00-model_states.pt. +[2024-04-24 07:22:20,682] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_04-model_01-model_states.pt. +[2024-04-24 07:22:20,686] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_00-model_states.pt... +[2024-04-24 07:22:20,696] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_01-model_states.pt. +[2024-04-24 07:22:20,727] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_13-model_00-model_states.pt. +[2024-04-24 07:22:20,770] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_01-model_states.pt... +[2024-04-24 07:22:20,783] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_01-model_states.pt... +[2024-04-24 07:22:20,822] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_00-model_states.pt... +[2024-04-24 07:22:20,986] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_00-model_states.pt. +[2024-04-24 07:22:21,055] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_00-model_states.pt... +[2024-04-24 07:22:21,090] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_05-model_01-model_states.pt. +[2024-04-24 07:22:21,116] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_00-model_states.pt. +[2024-04-24 07:22:21,148] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_14-model_01-model_states.pt. +[2024-04-24 07:22:21,171] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_01-model_states.pt... +[2024-04-24 07:22:21,198] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_00-model_states.pt... +[2024-04-24 07:22:21,236] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_01-model_states.pt... +[2024-04-24 07:22:21,341] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_00-model_states.pt. +[2024-04-24 07:22:21,414] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_00-model_states.pt... +[2024-04-24 07:22:21,475] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_06-model_01-model_states.pt. +[2024-04-24 07:22:21,508] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_00-model_states.pt. +[2024-04-24 07:22:21,549] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_15-model_01-model_states.pt. +[2024-04-24 07:22:21,563] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_01-model_states.pt... +[2024-04-24 07:22:21,593] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_00-model_states.pt... +[2024-04-24 07:22:21,639] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_01-model_states.pt... +[2024-04-24 07:22:21,695] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_00-model_states.pt. +[2024-04-24 07:22:21,769] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_00-model_states.pt... +[2024-04-24 07:22:21,866] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_07-model_01-model_states.pt. +[2024-04-24 07:22:21,881] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_00-model_states.pt. +[2024-04-24 07:22:21,946] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_01-model_states.pt... +[2024-04-24 07:22:21,958] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_00-model_states.pt... +[2024-04-24 07:22:21,969] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_16-model_01-model_states.pt. +[2024-04-24 07:22:22,044] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_01-model_states.pt... +[2024-04-24 07:22:22,067] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_00-model_states.pt. +[2024-04-24 07:22:22,142] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_00-model_states.pt... +[2024-04-24 07:22:22,259] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_08-model_01-model_states.pt. +[2024-04-24 07:22:22,264] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_00-model_states.pt. +[2024-04-24 07:22:22,337] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_01-model_states.pt... +[2024-04-24 07:22:22,346] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_00-model_states.pt... +[2024-04-24 07:22:22,348] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_17-model_01-model_states.pt. +[2024-04-24 07:22:22,418] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_00-model_states.pt. +[2024-04-24 07:22:22,429] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_01-model_states.pt... +[2024-04-24 07:22:22,488] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_00-model_states.pt... +[2024-04-24 07:22:22,628] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_09-model_01-model_states.pt. +[2024-04-24 07:22:22,643] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_00-model_states.pt. +[2024-04-24 07:22:22,647] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_00-model_states.pt... +[2024-04-24 07:22:22,652] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_00-model_states.pt. +[2024-04-24 07:22:22,708] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_01-model_states.pt... +[2024-04-24 07:22:22,723] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_18-model_01-model_states.pt. +[2024-04-24 07:22:22,726] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_01-model_states.pt... +[2024-04-24 07:22:22,729] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_00-model_states.pt... +[2024-04-24 07:22:22,738] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_20-model_01-model_states.pt. +[2024-04-24 07:22:22,756] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_00-model_states.pt. +[2024-04-24 07:22:22,758] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt +[2024-04-24 07:22:22,758] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt... +[2024-04-24 07:22:22,780] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_00_model_states.pt. +[2024-04-24 07:22:22,783] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:22,785] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:22,807] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_01-model_states.pt... +[2024-04-24 07:22:22,983] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_00-model_states.pt. +[2024-04-24 07:22:22,985] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_02_model_states.pt... +[2024-04-24 07:22:23,009] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_02_model_states.pt. +[2024-04-24 07:22:23,013] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:23,014] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:23,016] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_10-model_01-model_states.pt. +[2024-04-24 07:22:23,018] [INFO] [logging.py:96:log_dist] [Rank 1] Saving model checkpoint: /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt +[2024-04-24 07:22:23,018] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt... +[2024-04-24 07:22:23,036] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_01_model_states.pt. +[2024-04-24 07:22:23,039] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:23,040] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:23,059] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/layer_21-model_01-model_states.pt. +[2024-04-24 07:22:23,061] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_03_model_states.pt... +[2024-04-24 07:22:23,081] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/mp_rank_03_model_states.pt. +[2024-04-24 07:22:23,083] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:23,085] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,301] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:36,302] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:36,438] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:36,439] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:36,440] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:36,441] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,796] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:36,797] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt... +[2024-04-24 07:22:36,821] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:36,822] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt... +[2024-04-24 07:22:38,082] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +[2024-04-24 07:22:38,083] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:38,152] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +[2024-04-24 07:22:38,152] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt... +[2024-04-24 07:22:38,158] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:38,158] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt... +[2024-04-24 07:22:51,480] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:56,262] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:56,262] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_03_optim_states.pt +[2024-04-24 07:22:56,262] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:56,904] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +[2024-04-24 07:22:56,904] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,019] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt. +[2024-04-24 07:22:57,020] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_03_optim_states.pt +[2024-04-24 07:22:57,020] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,693] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:57,694] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_01_optim_states.pt +[2024-04-24 07:22:57,694] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,834] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt. +[2024-04-24 07:22:57,834] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +[2024-04-24 07:22:57,834] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:22:57,852] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt. +[2024-04-24 07:22:57,853] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_01_optim_states.pt +[2024-04-24 07:22:57,853] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:23:02,350] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt. +[2024-04-24 07:23:02,351] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_1_mp_rank_02_optim_states.pt +[2024-04-24 07:23:02,351] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +[2024-04-24 07:23:03,032] [INFO] [torch_checkpoint_engine.py:24:save] [Torch] Saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt. +[2024-04-24 07:23:03,032] [INFO] [engine.py:3481:_save_zero_checkpoint] bf16_zero checkpoint saved /data/output/llama13b_5M//checkpoints_zero_stage_2/global_step0/bf16_zero_pp_rank_0_mp_rank_02_optim_states.pt +[2024-04-24 07:23:03,033] [INFO] [torch_checkpoint_engine.py:34:commit] [Torch] Checkpoint global_step0 is ready now! +Couldn't save model in huggingface format +Couldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface formatCouldn't save model in huggingface format + +Couldn't save model in huggingface format +Couldn't save model in huggingface format + + + + successfully saved checkpoint at iteration 20 to /data/output/llama13b_5M//checkpoints_zero_stage_2 +Checkpoint Save GB: 78.286, GB/Sec: 1.79, Latency(second): 43.674 +time (ms) | save-checkpoint: 43676.29 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-24 07:23:07,111] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +Convert DeepSpeed Checkpoint to Universal Checkpoint +args = Namespace(input_folder='/data/output/llama13b_5M//checkpoints_zero_stage_2', output_folder='/data/output/univ_ckpt_new', num_extract_workers=4, num_merge_workers=2) +Converting DeepSpeed checkpoint in /data/output/llama13b_5M//checkpoints_zero_stage_2 to Universal checkpoint in /data/output/univ_ckpt_new +*** 1. Extracting ZeRO fragments + 0%| | 0/2 [00:00 + main() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 329, in main + model = create_model(args, config) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 80, in create_model + model_config.vocab_size = config['MODEL']['vocab_size'] +KeyError: 'vocab_size' +Using device 'hpu' +/usr/local/lib/python3.10/dist-packages/torch/_utils.py:842: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for hellaswag contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hellaswag +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Task: hellaswag; number of docs: 10042 +Task: hellaswag; document 0; context prompt (starting on next line): +Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you. +(end of prompt on previous line) +Requests: [Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' You can visit a lingerie shop and have them measure you to help you fit a bra to your size, or measure yourself before you shop for a new bra to ensure that you get a good fit. Use a flexible tape measure, like one found in a sewing kit.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' This is why it is important to keep your breasts under protection when in the shower and only wear bras that are larger than your breast size. If you are not wearing a bra, try wearing something that is a little bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' For a girl, a bra with a support strap will be easier for her, because most women are unable to pull through bra straps and bras that are too small will not be able to support breasts from side-to-side. Many bras have even been created that cover the breast side, and can be sent to other women in the world to make them look bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' Choose a color that is flattering to your breast type and specific event, in addition to those that make you uncomfortable. Look for sports bras made from natural material, such as spandex or lycra, as this is a more breathable bra.')[0] +] +Running loglikelihood requests + 0%| | 0/40145 [00:00 + main() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 329, in main + model = create_model(args, config) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/tools/convert_checkpoint/universal_to_huggingface.py", line 80, in create_model + model_config.vocab_size = config['MODEL']['vocab_size'] +KeyError: 'vocab_size' +Using device 'hpu' +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for hellaswag contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hellaswag +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Task: hellaswag; number of docs: 10042 +Task: hellaswag; document 0; context prompt (starting on next line): +Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you. +(end of prompt on previous line) +Requests: [Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' You can visit a lingerie shop and have them measure you to help you fit a bra to your size, or measure yourself before you shop for a new bra to ensure that you get a good fit. Use a flexible tape measure, like one found in a sewing kit.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' This is why it is important to keep your breasts under protection when in the shower and only wear bras that are larger than your breast size. If you are not wearing a bra, try wearing something that is a little bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' For a girl, a bra with a support strap will be easier for her, because most women are unable to pull through bra straps and bras that are too small will not be able to support breasts from side-to-side. Many bras have even been created that cover the breast side, and can be sent to other women in the world to make them look bigger.')[0] +, Req_loglikelihood('Personal Care and Style: How to increase breast size with a bra. Check your bra size. Wearing a bra that is too big will not make your breasts look larger. That is why it is important to wear the right size bra for you.', ' Choose a color that is flattering to your breast type and specific event, in addition to those that make you uncomfortable. Look for sports bras made from natural material, such as spandex or lycra, as this is a more breathable bra.')[0] +] +Running loglikelihood requests + 0%| | 0/40145 [00:00, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:15:39,161] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:15:39,161] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:15:39,324] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:15:39,324] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:15:39,324] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:15:39,324] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:15:39,324] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:15:40,973] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:15:40,973] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:15:40,982] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:15:41,032] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 09:15:41,048] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:15:41,067] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:15:41,099] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:15:41,106] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:15:41,108] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:15:41,111] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:15:41,117] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:15:41,139] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:15:41,145] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:15:41,147] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:15:41,152] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 09:15:41,183] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: Traceback (most recent call last): +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: ImportErrorcannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: : cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:15:43,168] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115299 +100.83.37.188: [2024-05-13 09:15:43,170] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115300 +100.83.37.188: [2024-05-13 09:15:43,170] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115301 +100.83.37.188: [2024-05-13 09:15:43,170] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115302 +100.83.37.188: [2024-05-13 09:15:43,171] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115303 +100.83.37.188: [2024-05-13 09:15:43,171] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115304 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:15:43,198] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115305 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:15:43,251] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 115306 +100.83.37.188: [2024-05-13 09:15:43,252] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:15:33/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: [2024-05-13 09:15:43,331] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19210 +100.83.37.175: [2024-05-13 09:15:43,386] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19211 +100.83.37.175: [2024-05-13 09:15:43,386] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19212 +100.83.37.175: [2024-05-13 09:15:43,413] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19213 +100.83.37.175: [2024-05-13 09:15:43,414] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19214 +100.83.37.175: [2024-05-13 09:15:43,442] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19218 +100.83.37.175: [2024-05-13 09:15:43,494] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19221 +100.83.37.175: [2024-05-13 09:15:43,495] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 19225 +100.83.37.175: [2024-05-13 09:15:43,522] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:15:33/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:15:33/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:15:33/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:15:33/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:15:33/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:17:37/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:17:37/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:17:37/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:17:37/log.txt b/llama13b_multiling_800M/13-05-2024-09:17:37/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..b54df2a1901fcd8af1569a72f4b51fb36cd2f1ca --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:17:37/log.txt @@ -0,0 +1,192 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:17:39,563] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:17:40,878] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:17:40,878] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:17:40,878] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:17:37/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:42,581] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:42,604] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:17:43,708] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:17:43,709] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:17:43,709] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:17:43,709] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:17:43,709] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:17:44,192] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:17:44,192] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:17:44,192] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:17:44,192] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:17:44,192] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:17:45,563] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:45,570] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:45,591] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:45,601] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:17:45,602] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:45,653] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:45,657] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:17:45,693] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 09:17:45,897] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:45,898] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:45,908] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:17:45,958] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:46,008] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:46,041] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:46,042] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:17:46,045] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.188: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.188: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:17:47,715] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116640 +100.83.37.188: [2024-05-13 09:17:47,717] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116641 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:17:47,744] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116642 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:17:47,771] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116643 +100.83.37.188: [2024-05-13 09:17:47,771] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116644 +100.83.37.188: [2024-05-13 09:17:47,772] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116645 +100.83.37.188: [2024-05-13 09:17:47,799] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116646 +100.83.37.188: [2024-05-13 09:17:47,799] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 116647 +100.83.37.188: [2024-05-13 09:17:47,826] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:17:37/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 25, in +100.83.37.175: from megatron.model import LLaMAModel, LLaMAModelPipe +100.83.37.175: ImportError: cannot import name 'LLaMAModel' from 'megatron.model' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: [2024-05-13 09:17:48,200] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20742 +100.83.37.175: [2024-05-13 09:17:48,201] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20743 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +100.83.37.175: [2024-05-13 09:17:48,229] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20744 +100.83.37.175: [2024-05-13 09:17:48,229] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20745 +100.83.37.175: [2024-05-13 09:17:48,229] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20749 +100.83.37.175: [2024-05-13 09:17:48,230] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20753 +100.83.37.175: [2024-05-13 09:17:48,230] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20755 +100.83.37.175: [2024-05-13 09:17:48,257] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 20760 +100.83.37.175: [2024-05-13 09:17:48,310] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:17:37/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:17:37/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:17:37/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:17:37/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:17:37/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:19:04/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:19:04/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:19:04/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:19:04/log.txt b/llama13b_multiling_800M/13-05-2024-09:19:04/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a86c56a767f690e9419f317b8ab3f625b038593 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:19:04/log.txt @@ -0,0 +1,338 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:19:05,899] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:19:07,222] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:19:07,222] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:19:07,222] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:19:04/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:08,928] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:19:08,948] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:19:10,060] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:19:10,061] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:19:10,061] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:19:10,061] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:19:10,061] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:19:10,515] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:19:10,515] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:19:10,515] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:19:10,515] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:19:10,515] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:19:11,871] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:11,885] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:11,891] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:19:11,941] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:11,983] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:11,989] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:11,991] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:19:11,998] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:19:12,271] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:19:12,293] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:19:12,303] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:19:12,347] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:19:12,348] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:19:12,356] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:19:12,379] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:19:12,820] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.188: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.188: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.188: [2024-05-13 09:19:14,067] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117981 +100.83.37.188: [2024-05-13 09:19:14,069] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117982 +100.83.37.188: [2024-05-13 09:19:14,069] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117983 +100.83.37.188: [2024-05-13 09:19:14,069] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117984 +100.83.37.188: [2024-05-13 09:19:14,070] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117985 +100.83.37.188: [2024-05-13 09:19:14,070] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117986 +100.83.37.188: [2024-05-13 09:19:14,070] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117987 +100.83.37.188: [2024-05-13 09:19:14,070] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 117988 +100.83.37.188: [2024-05-13 09:19:14,071] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:19:04/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 29, in +100.83.37.175: from .utils import init_method_normal, scaled_init_method_normal, WrapName +100.83.37.175: ImportError: cannot import name 'WrapName' from 'megatron.model.utils' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/utils.py) +100.83.37.175: [2024-05-13 09:19:14,523] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22272 +100.83.37.175: [2024-05-13 09:19:14,551] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22273 +100.83.37.175: [2024-05-13 09:19:14,551] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22274 +100.83.37.175: [2024-05-13 09:19:14,605] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22275 +100.83.37.175: [2024-05-13 09:19:14,632] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22276 +100.83.37.175: [2024-05-13 09:19:14,685] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22280 +100.83.37.175: [2024-05-13 09:19:14,686] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22286 +100.83.37.175: [2024-05-13 09:19:14,738] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 22288 +100.83.37.175: [2024-05-13 09:19:14,739] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:19:04/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:19:04/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:19:04/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:19:04/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:19:04/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:21:14/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:21:14/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:21:14/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:21:14/log.txt b/llama13b_multiling_800M/13-05-2024-09:21:14/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..232757c50fb6ac5f486e938506ebe24b2c9c9d2a --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:21:14/log.txt @@ -0,0 +1,352 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:21:16,057] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:21:17,379] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:21:17,380] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:21:17,380] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:21:14/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:19,106] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:19,149] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:21:20,287] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:21:20,287] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:21:20,287] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:21:20,287] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:21:20,287] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:21:20,675] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:21:20,675] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:21:20,675] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:21:20,675] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:21:20,675] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:21:22,063] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:22,131] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:22,159] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:22,173] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:22,190] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:22,196] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:21:22,200] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:21:22,243] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,365] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,427] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,438] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,458] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,458] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,504] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:21:22,506] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:21:22,543] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:21:24,294] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119322 +100.83.37.188: [2024-05-13 09:21:24,296] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119323 +100.83.37.188: [2024-05-13 09:21:24,296] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119324 +100.83.37.188: [2024-05-13 09:21:24,296] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119325 +100.83.37.188: [2024-05-13 09:21:24,324] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119326 +100.83.37.188: [2024-05-13 09:21:24,324] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119327 +100.83.37.188: [2024-05-13 09:21:24,376] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119328 +100.83.37.188: [2024-05-13 09:21:24,376] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 119329 +100.83.37.188: [2024-05-13 09:21:24,377] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:21:14/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: [2024-05-13 09:21:24,683] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23800 +100.83.37.175: [2024-05-13 09:21:24,685] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23801 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +100.83.37.175: [2024-05-13 09:21:24,738] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23802 +100.83.37.175: [2024-05-13 09:21:24,739] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23803 +100.83.37.175: [2024-05-13 09:21:24,739] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23807 +100.83.37.175: [2024-05-13 09:21:24,791] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23810 +100.83.37.175: [2024-05-13 09:21:24,819] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23813 +100.83.37.175: [2024-05-13 09:21:24,820] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 23816 +100.83.37.175: [2024-05-13 09:21:24,820] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:21:14/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:21:14/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:21:14/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:21:14/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:21:14/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:23:20/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:23:20/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:23:20/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:23:20/log.txt b/llama13b_multiling_800M/13-05-2024-09:23:20/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..eabff197c52b8628e6108a03f9bd3bffb0450bbd --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:23:20/log.txt @@ -0,0 +1,352 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:23:22,419] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:23:23,730] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:23:23,730] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:23:23,730] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:23:20/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:23:25,440] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:23:25,443] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:23:26,645] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:23:26,645] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:23:26,645] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:23:26,645] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:23:26,645] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:23:26,998] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:23:26,998] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:23:26,998] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:23:26,998] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:23:26,998] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:23:28,463] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:23:28,476] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:23:28,493] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:23:28,493] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:23:28,495] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:23:28,533] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:23:28,545] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:23:28,551] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:23:28,740] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:23:28,745] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:23:28,747] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:23:28,759] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:23:28,795] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:23:28,838] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:23:28,838] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:23:29,024] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.188: from megatron import get_args +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.188: from .initialize import initialize_megatron +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.188: from megatron.arguments import (parse_args, validate_args) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.188: from megatron.model.utils import init_method_normal +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.188: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.188: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.188: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.188: [2024-05-13 09:23:30,652] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120663 +100.83.37.188: [2024-05-13 09:23:30,654] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120664 +100.83.37.188: [2024-05-13 09:23:30,654] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120665 +100.83.37.188: [2024-05-13 09:23:30,654] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120666 +100.83.37.188: [2024-05-13 09:23:30,655] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120667 +100.83.37.188: [2024-05-13 09:23:30,655] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120668 +100.83.37.188: [2024-05-13 09:23:30,655] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120669 +100.83.37.188: [2024-05-13 09:23:30,655] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 120670 +100.83.37.188: [2024-05-13 09:23:30,656] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:23:20/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: [2024-05-13 09:23:31,007] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25330 +100.83.37.175: [2024-05-13 09:23:31,009] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25331 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +100.83.37.175: [2024-05-13 09:23:31,036] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25332 +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 20, in +100.83.37.175: from megatron import get_args +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/__init__.py", line 17, in +100.83.37.175: from .initialize import initialize_megatron +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 20, in +100.83.37.175: from megatron.arguments import (parse_args, validate_args) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/arguments.py", line 20, in +100.83.37.175: from megatron.model.utils import init_method_normal +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py", line 16, in +100.83.37.175: from .llama_model import LLaMAModel, LLaMAModelPipe +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py", line 32, in +100.83.37.175: from megatron.model import RMSNorm, LayerNorm, CrossEntropy +100.83.37.175: ImportError: cannot import name 'RMSNorm' from partially initialized module 'megatron.model' (most likely due to a circular import) (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__init__.py) +100.83.37.175: [2024-05-13 09:23:31,089] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25333 +100.83.37.175: [2024-05-13 09:23:31,090] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25337 +100.83.37.175: [2024-05-13 09:23:31,118] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25340 +100.83.37.175: [2024-05-13 09:23:31,118] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25344 +100.83.37.175: [2024-05-13 09:23:31,145] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 25348 +100.83.37.175: [2024-05-13 09:23:31,198] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:23:20/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:23:20/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:23:20/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:23:20/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:23:20/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:29:05/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:29:05/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:29:05/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:29:05/log.txt b/llama13b_multiling_800M/13-05-2024-09:29:05/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..83d4a766a1efd127c69089edfa1050fb49581dc1 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:29:05/log.txt @@ -0,0 +1,192 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:29:06,726] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:29:08,058] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:29:08,058] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:29:08,058] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:29:05/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:09,769] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:29:09,793] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:29:10,927] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:29:10,927] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:29:10,927] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:29:10,927] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:29:10,927] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:29:11,349] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:29:11,349] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:29:11,349] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:29:11,349] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:29:11,349] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:29:12,742] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:29:12,778] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:29:12,800] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:29:12,837] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:29:12,847] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:29:12,851] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:29:13,074] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:13,080] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 09:29:13,083] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:13,090] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:13,091] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:13,095] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:29:13,188] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:13,189] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:29:13,189] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:29:13,261] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: Traceback (most recent call last): +100.83.37.175: from megatron.global_vars import get_current_device File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: ImportErrorcannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: : cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: [2024-05-13 09:29:15,935] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122004 +100.83.37.188: [2024-05-13 09:29:15,937] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122005 +100.83.37.188: [2024-05-13 09:29:15,937] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122006 +100.83.37.188: [2024-05-13 09:29:15,937] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122007 +100.83.37.188: [2024-05-13 09:29:15,938] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122008 +100.83.37.188: [2024-05-13 09:29:15,938] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122009 +100.83.37.188: [2024-05-13 09:29:15,938] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122010 +100.83.37.188: [2024-05-13 09:29:15,938] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 122011 +100.83.37.188: [2024-05-13 09:29:15,965] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:29:05/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +100.83.37.175: [2024-05-13 09:29:16,357] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26867 +100.83.37.175: [2024-05-13 09:29:16,359] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26868 +100.83.37.175: [2024-05-13 09:29:16,359] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26869 +100.83.37.175: [2024-05-13 09:29:16,360] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26870 +100.83.37.175: [2024-05-13 09:29:16,360] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26873 +100.83.37.175: [2024-05-13 09:29:16,361] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26875 +100.83.37.175: [2024-05-13 09:29:16,361] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26877 +100.83.37.175: [2024-05-13 09:29:16,362] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 26878 +100.83.37.175: [2024-05-13 09:29:16,362] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:29:05/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:29:05/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:29:05/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:29:05/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:29:05/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:32:36/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:32:36/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:32:36/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:32:36/log.txt b/llama13b_multiling_800M/13-05-2024-09:32:36/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..9cfded3d3d195b8ca38bac887d4dbc5c21cfd282 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:32:36/log.txt @@ -0,0 +1,192 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:32:38,292] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:32:39,628] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:32:39,628] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:32:39,628] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:32:36/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:32:41,274] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:41,392] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:32:42,611] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:32:42,611] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:32:42,611] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:32:42,611] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:32:42,611] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: [2024-05-13 09:32:42,649] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:32:42,649] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:32:42,649] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:32:42,649] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:32:42,649] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:32:44,388] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:32:44,393] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:32:44,401] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:32:44,405] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,411] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,416] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,423] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:32:44,476] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:32:44,478] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:32:44,496] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 09:32:44,546] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:32:44,557] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,575] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,602] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,603] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:32:44,606] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: ImportErrorcannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py): +100.83.37.188: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.188: from megatron.global_vars import get_current_device +100.83.37.188: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 27, in +100.83.37.175: from megatron.global_vars import get_current_device +100.83.37.175: ImportError: cannot import name 'get_current_device' from 'megatron.global_vars' (/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/global_vars.py) +100.83.37.188: [2024-05-13 09:32:47,619] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123369 +100.83.37.188: [2024-05-13 09:32:47,620] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123370 +100.83.37.188: [2024-05-13 09:32:47,621] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123371 +100.83.37.188: [2024-05-13 09:32:47,621] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123372 +100.83.37.188: [2024-05-13 09:32:47,621] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123373 +100.83.37.188: [2024-05-13 09:32:47,621] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123374 +100.83.37.188: [2024-05-13 09:32:47,621] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123375 +100.83.37.188: [2024-05-13 09:32:47,622] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 123376 +100.83.37.188: [2024-05-13 09:32:47,622] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:32:36/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: [2024-05-13 09:32:47,658] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28420 +100.83.37.175: [2024-05-13 09:32:47,659] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28421 +100.83.37.175: [2024-05-13 09:32:47,660] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28422 +100.83.37.175: [2024-05-13 09:32:47,660] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28423 +100.83.37.175: [2024-05-13 09:32:47,661] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28427 +100.83.37.175: [2024-05-13 09:32:47,661] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28432 +100.83.37.175: [2024-05-13 09:32:47,661] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28435 +100.83.37.175: [2024-05-13 09:32:47,662] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 28441 +100.83.37.175: [2024-05-13 09:32:47,662] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:32:36/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:32:36/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:32:36/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:32:36/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:32:36/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:34:09/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:34:09/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:34:09/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:34:09/log.txt b/llama13b_multiling_800M/13-05-2024-09:34:09/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..5676647d71ceb38da7ef2fae332efa8824b20e8a --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:34:09/log.txt @@ -0,0 +1,656 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:34:11,123] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:34:12,433] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:34:12,434] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:34:12,434] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:34:09/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:34:14,112] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:34:14,229] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:34:15,453] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:34:15,454] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:34:15,454] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:34:15,454] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:34:15,454] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: [2024-05-13 09:34:15,503] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:34:15,503] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:34:15,503] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:34:15,503] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:34:15,503] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:34:17,241] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:34:17,263] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:34:17,267] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:34:17,266] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: [2024-05-13 09:34:17,266] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:34:17,267] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:34:17,314] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:34:17,373] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 09:34:17,383] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:34:17,386] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:34:17,387] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:34:17,439] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 09:34:17,464] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:34:17,465] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:34:17,467] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:34:17,530] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: DeepSpeed C++/CUDA extension op report-------------------------------------------------- +100.83.37.188: +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op.-------------------------------------------------- +100.83.37.188: -------------------------------------------------- +100.83.37.188: +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja ninja.................. [OKAY].................. +100.83.37.188: [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name op name................ ................installed installed.. ..compatible +100.83.37.188: compatible-------------------------------------------------- +100.83.37.188: +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ...............cpu_adam [NO]............... .......[NO] [OKAY]....... +100.83.37.188: [OKAY] +100.83.37.188: fused_adam fused_adam............. .............[NO] [NO]....... [OKAY]....... +100.83.37.188: [OKAY] +100.83.37.188: deepspeed_not_implemented deepspeed_not_implemented[NO] ....... [OKAY][NO] +100.83.37.188: ....... transformer_inference[OKAY] +100.83.37.188: .. [NO] transformer_inference....... ..[OKAY] +100.83.37.188: [NO]-------------------------------------------------- +100.83.37.188: ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ...............DeepSpeed general environment info: +100.83.37.188: torch install path['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: ............... torch version .................... 2.1.1a0+gitb51c9f6['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: deepspeed install path +100.83.37.188: ........... torch version['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: .................... deepspeed info 2.1.1a0+gitb51c9f6................... +100.83.37.188: 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0deepspeed install path +100.83.37.188: deepspeed wheel compiled w............ ...... ['/usr/local/lib/python3.10/dist-packages/deepspeed']torch 2.1 +100.83.37.188: +100.83.37.188: deepspeed infoshared memory (/dev/shm) size ....................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0503.75 GB +100.83.37.188: +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown ******** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: +100.83.37.188: Traceback (most recent call last): +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: TypeErrorpretrain() missing 1 required positional argument: 'forward_step_func': +100.83.37.188: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: Traceback (most recent call last): +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.188: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.188: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: Traceback (most recent call last): +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in +100.83.37.175: pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, +100.83.37.175: TypeError: pretrain() missing 1 required positional argument: 'forward_step_func' +100.83.37.188: [2024-05-13 09:34:20,462] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124734 +100.83.37.188: [2024-05-13 09:34:20,463] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124735 +100.83.37.188: [2024-05-13 09:34:20,464] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124736 +100.83.37.188: [2024-05-13 09:34:20,464] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124737 +100.83.37.188: [2024-05-13 09:34:20,464] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124738 +100.83.37.188: [2024-05-13 09:34:20,464] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124739 +100.83.37.188: [2024-05-13 09:34:20,465] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124740 +100.83.37.188: [2024-05-13 09:34:20,465] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 124741 +100.83.37.188: [2024-05-13 09:34:20,465] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:34:09/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: [2024-05-13 09:34:20,512] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29973 +100.83.37.175: [2024-05-13 09:34:20,514] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29974 +100.83.37.175: [2024-05-13 09:34:20,514] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29975 +100.83.37.175: [2024-05-13 09:34:20,515] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29976 +100.83.37.175: [2024-05-13 09:34:20,515] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29979 +100.83.37.175: [2024-05-13 09:34:20,516] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29982 +100.83.37.175: [2024-05-13 09:34:20,516] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29987 +100.83.37.175: [2024-05-13 09:34:20,516] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 29988 +100.83.37.175: [2024-05-13 09:34:20,517] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:34:09/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:34:09/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:34:09/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:34:09/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:34:09/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:58:53/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:58:53/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:58:53/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:58:53/log.txt b/llama13b_multiling_800M/13-05-2024-09:58:53/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..f822b426dbdea1e057c047d02e530444655f0614 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:58:53/log.txt @@ -0,0 +1,128 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:58:54,809] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:58:56,129] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:58:56,129] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:58:56,129] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:58:53/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:58:57,783] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:58:57,862] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:58:58,998] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:58:58,998] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:58:58,998] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:58:58,998] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:58:58,998] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: ) +100.83.37.188: IndentationError: unexpected indent +100.83.37.175: [2024-05-13 09:58:59,320] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:58:59,320] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:58:59,320] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:58:59,320] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:58:59,320] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: IndentationError: unexpected indent +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: IndentationError: unexpected indent +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: IndentationError: unexpected indent +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: IndentationError: unexpected indent +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: ) +100.83.37.175: IndentationError: IndentationErrorunexpected indent: +100.83.37.175: unexpected indent +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: IndentationError: unexpected indent +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: ) +100.83.37.175: IndentationError: unexpected indent +100.83.37.188: [2024-05-13 09:59:00,002] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126123 +100.83.37.188: [2024-05-13 09:59:00,003] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126124 +100.83.37.188: [2024-05-13 09:59:00,003] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126125 +100.83.37.188: [2024-05-13 09:59:00,004] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126126 +100.83.37.188: [2024-05-13 09:59:00,004] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126127 +100.83.37.188: [2024-05-13 09:59:00,004] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126128 +100.83.37.188: [2024-05-13 09:59:00,004] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126129 +100.83.37.188: [2024-05-13 09:59:00,004] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126130 +100.83.37.188: [2024-05-13 09:59:00,005] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:58:53/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +100.83.37.175: [2024-05-13 09:59:00,324] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31578 +100.83.37.175: [2024-05-13 09:59:00,326] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31579 +100.83.37.175: [2024-05-13 09:59:00,326] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31580 +100.83.37.175: [2024-05-13 09:59:00,327] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31581 +100.83.37.175: [2024-05-13 09:59:00,327] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31585 +100.83.37.175: [2024-05-13 09:59:00,328] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31590 +100.83.37.175: [2024-05-13 09:59:00,328] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31592 +100.83.37.175: [2024-05-13 09:59:00,328] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 31599 +100.83.37.175: [2024-05-13 09:59:00,329] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:58:53/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:58:53/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-09:58:53/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:58:53/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:58:53/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:59:29/ds_config.json b/llama13b_multiling_800M/13-05-2024-09:59:29/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:59:29/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-09:59:29/log.txt b/llama13b_multiling_800M/13-05-2024-09:59:29/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..36283c08b2fa852c97a720407bd55f87f6fe2e99 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:59:29/log.txt @@ -0,0 +1,4800 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 09:59:31,359] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 09:59:32,672] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 09:59:32,673] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 09:59:32,673] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:59:29/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:59:34,315] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:34,387] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 09:59:35,519] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 09:59:35,519] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 09:59:35,519] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 09:59:35,519] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 09:59:35,519] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 09:59:35,697] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 09:59:35,697] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 09:59:35,697] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 09:59:35,697] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 09:59:35,697] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 09:59:37,303] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,313] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,380] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 09:59:37,407] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:59:37,408] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:59:37,413] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:59:37,413] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:59:37,414] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 09:59:37,423] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,438] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,445] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,449] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,450] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:59:37,454] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 09:59:37,463] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 09:59:37,686] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt +100.83.37.188: [2024-05-13 09:59:40,527] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126376 +100.83.37.188: [2024-05-13 09:59:40,528] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126377 +100.83.37.188: [2024-05-13 09:59:40,529] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126378 +100.83.37.188: [2024-05-13 09:59:40,529] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126379 +100.83.37.188: [2024-05-13 09:59:40,529] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126380 +100.83.37.188: [2024-05-13 09:59:40,530] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126381 +100.83.37.188: [2024-05-13 09:59:40,530] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126382 +100.83.37.188: [2024-05-13 09:59:40,530] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 126383 +100.83.37.188: [2024-05-13 09:59:40,530] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:59:29/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 2 +100.83.37.175: [2024-05-13 09:59:40,706] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32019 +100.83.37.175: [2024-05-13 09:59:40,707] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32020 +100.83.37.175: [2024-05-13 09:59:40,708] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32021 +100.83.37.175: [2024-05-13 09:59:40,708] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32022 +100.83.37.175: [2024-05-13 09:59:40,709] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32026 +100.83.37.175: [2024-05-13 09:59:40,709] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32029 +100.83.37.175: [2024-05-13 09:59:40,709] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32033 +100.83.37.175: [2024-05-13 09:59:40,710] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 32036 +100.83.37.175: [2024-05-13 09:59:40,710] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-09:59:29/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-09:59:29/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 2 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 2 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 2 diff --git a/llama13b_multiling_800M/13-05-2024-09:59:29/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-09:59:29/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-09:59:29/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-11:50:01/ds_config.json b/llama13b_multiling_800M/13-05-2024-11:50:01/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:50:01/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-11:50:01/log.txt b/llama13b_multiling_800M/13-05-2024-11:50:01/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..daaa110538d60794c8d33e8de5f2c4f74eed7abb --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:50:01/log.txt @@ -0,0 +1,3923 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 11:50:03,059] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 11:50:04,374] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 11:50:04,374] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 11:50:04,374] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:50:01/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:50:06,041] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:50:06,084] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 11:50:07,212] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 11:50:07,212] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 11:50:07,212] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 11:50:07,212] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 11:50:07,212] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 11:50:07,442] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 11:50:07,443] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 11:50:07,443] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 11:50:07,443] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 11:50:07,443] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: [2024-05-13 11:50:09,078] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:50:09,095] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:50:09,117] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:50:09,117] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 11:50:09,117] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 11:50:09,136] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:50:09,138] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 11:50:09,176] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 11:50:09,176] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:50:09,178] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:50:09,180] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:50:09,192] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:50:09,194] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 11:50:09,725] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: [2024-05-13 11:50:09,863] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:50:09,868] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: DeepSpeed C++/CUDA extension op reportDeepSpeed C++/CUDA extension op report +100.83.37.188: +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op.NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: JIT compiled ops requires ninjaJIT compiled ops requires ninja +100.83.37.188: +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. ninjacompatible +100.83.37.188: --------------------------------------------------.................. +100.83.37.188: [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name cpu_adam................ ...............installed [NO].. .......compatible +100.83.37.188: [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: fused_adam ............. [NO] .......cpu_adam [OKAY]............... +100.83.37.188: [NO]deepspeed_not_implemented ....... [OKAY][NO] +100.83.37.188: ....... [OKAY] +100.83.37.188: fused_adam .............transformer_inference [NO].. .......[NO] [OKAY]....... +100.83.37.188: [OKAY] +100.83.37.188: deepspeed_not_implemented-------------------------------------------------- +100.83.37.188: [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... DeepSpeed general environment info: +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/torch']torch install path +100.83.37.188: ............... torch version .................... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: 2.1.1a0+gitb51c9f6 +100.83.37.188: torch versiondeepspeed install path .................... ........... 2.1.1a0+gitb51c9f6 +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed install path deepspeed info........... ...................['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed info deepspeed wheel compiled w.................... ......0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w.torch 2.1 +100.83.37.188: ......shared memory (/dev/shm) size torch 2.1 .... +100.83.37.188: shared memory (/dev/shm) size503.75 GB +100.83.37.188: .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: -------------------------------------------------- +100.83.37.188: ----------------------------------------------------------------------------------------------------DeepSpeed C++/CUDA extension op report +100.83.37.188: +100.83.37.188: +100.83.37.188: --------------------------------------------------DeepSpeed C++/CUDA extension op report +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op.-------------------------------------------------- +100.83.37.188: -------------------------------------------------- +100.83.37.188: +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: ninja ..................ninja [OKAY]ninja.................. +100.83.37.188: --------------------------------------------------[OKAY].................. +100.83.37.188: +100.83.37.188: [OKAY]op name-------------------------------------------------- +100.83.37.188: +100.83.37.188: ................ --------------------------------------------------op nameinstalled +100.83.37.188: ..................op name installedcompatible................ +100.83.37.188: ..installed -------------------------------------------------- compatible +100.83.37.188: .. +100.83.37.188: compatible-------------------------------------------------- +100.83.37.188: +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] cpu_adam....... cpu_adam...............[OKAY] ............... +100.83.37.188: [NO] [NO] fused_adam.............. [OKAY].............[OKAY] +100.83.37.188: +100.83.37.188: [NO] fused_adam.......fused_adam .............[OKAY]............. +100.83.37.188: [NO][NO] ....... deepspeed_not_implemented ....... [OKAY] +100.83.37.188: [OKAY] +100.83.37.188: [NO]deepspeed_not_implemented .......deepspeed_not_implemented [OKAY][NO] +100.83.37.188: [NO]....... transformer_inference ....... [OKAY] .. +100.83.37.188: [OKAY] +100.83.37.188: [NO] transformer_inference....... transformer_inference ..[OKAY].. +100.83.37.188: [NO][NO] -------------------------------------------------- ....... +100.83.37.188: ....... [OKAY][OKAY] +100.83.37.188: +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: DeepSpeed general environment info:torch install path +100.83.37.188: DeepSpeed general environment info:............... +100.83.37.188: torch install path torch install path............... ['/usr/local/lib/python3.10/dist-packages/torch']............... +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: ....................torch version torch version.................... 2.1.1a0+gitb51c9f6 .................... +100.83.37.188: 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path2.1.1a0+gitb51c9f6deepspeed install path +100.83.37.188: ........... deepspeed install path........... ['/usr/local/lib/python3.10/dist-packages/deepspeed']........... +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/deepspeed']deepspeed info['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: +100.83.37.188: ...................deepspeed infodeepspeed info 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0...................................... +100.83.37.188: 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0deepspeed wheel compiled w.0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: +100.83.37.188: deepspeed wheel compiled w.......deepspeed wheel compiled w. ......torch 2.1 ...... +100.83.37.188: torch 2.1 torch 2.1 +100.83.37.188: shared memory (/dev/shm) size +100.83.37.188: shared memory (/dev/shm) size.... shared memory (/dev/shm) size ....503.75 GB +100.83.37.188: ....503.75 GB +100.83.37.188: 503.75 GB +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.175: ---------------------------------------------------------------------------------------------------- +100.83.37.175: +100.83.37.175: DeepSpeed C++/CUDA extension op reportDeepSpeed C++/CUDA extension op report +100.83.37.175: +100.83.37.175: ---------------------------------------------------------------------------------------------------- +100.83.37.175: +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op.NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: +100.83.37.175: ---------------------------------------------------------------------------------------------------- +100.83.37.175: +100.83.37.175: JIT compiled ops requires ninjaJIT compiled ops requires ninja +100.83.37.175: +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: ninja-------------------------------------------------- +100.83.37.175: ..................op name [OKAY]................ +100.83.37.175: installed-------------------------------------------------- +100.83.37.175: .. compatibleop name +100.83.37.175: ................-------------------------------------------------- +100.83.37.175: installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY]cpu_adam +100.83.37.175: ............... [NO] fused_adam....... .............[OKAY] +100.83.37.175: [NO] ....... fused_adam[OKAY] +100.83.37.175: ............. [NO]deepspeed_not_implemented ....... [OKAY][NO] +100.83.37.175: ....... deepspeed_not_implemented[OKAY] +100.83.37.175: [NO] transformer_inference....... ..[OKAY] +100.83.37.175: [NO] .......transformer_inference [OKAY].. +100.83.37.175: [NO]-------------------------------------------------- +100.83.37.175: ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info:DeepSpeed general environment info: +100.83.37.175: +100.83.37.175: torch install pathtorch install path .............................. ['/usr/local/lib/python3.10/dist-packages/torch']['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: +100.83.37.175: torch versiontorch version ........................................ 2.1.1a0+gitb51c9f62.1.1a0+gitb51c9f6 +100.83.37.175: +100.83.37.175: deepspeed install pathdeepspeed install path ...................... ['/usr/local/lib/python3.10/dist-packages/deepspeed']['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: +100.83.37.175: deepspeed infodeepspeed info ...................................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.00.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: +100.83.37.175: deepspeed wheel compiled w.deepspeed wheel compiled w. ............ torch 2.1 torch 2.1 +100.83.37.175: +100.83.37.175: shared memory (/dev/shm) sizeshared memory (/dev/shm) size ........ 503.75 GB503.75 GB +100.83.37.175: +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt +100.83.37.188: [2024-05-13 11:50:12,220] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127781 +100.83.37.188: [2024-05-13 11:50:12,222] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127782 +100.83.37.188: [2024-05-13 11:50:12,249] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127783 +100.83.37.188: [2024-05-13 11:50:12,249] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127784 +100.83.37.188: [2024-05-13 11:50:12,249] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127785 +100.83.37.188: [2024-05-13 11:50:12,250] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127786 +100.83.37.188: [2024-05-13 11:50:12,250] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127787 +100.83.37.188: [2024-05-13 11:50:12,250] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 127788 +100.83.37.188: [2024-05-13 11:50:12,250] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:50:01/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 2 +100.83.37.175: [2024-05-13 11:50:12,451] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33636 +100.83.37.175: [2024-05-13 11:50:12,453] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33637 +100.83.37.175: [2024-05-13 11:50:12,453] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33638 +100.83.37.175: [2024-05-13 11:50:12,453] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33639 +100.83.37.175: [2024-05-13 11:50:12,454] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33641 +100.83.37.175: [2024-05-13 11:50:12,481] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33644 +100.83.37.175: [2024-05-13 11:50:12,507] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33647 +100.83.37.175: [2024-05-13 11:50:12,508] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 33649 +100.83.37.175: [2024-05-13 11:50:12,509] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:50:01/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:50:01/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 2 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 2 diff --git a/llama13b_multiling_800M/13-05-2024-11:50:01/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-11:50:01/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:50:01/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-11:52:31/ds_config.json b/llama13b_multiling_800M/13-05-2024-11:52:31/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:52:31/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-11:52:31/log.txt b/llama13b_multiling_800M/13-05-2024-11:52:31/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f826457c4ebef49ef2814f516907f7391aefacd --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:52:31/log.txt @@ -0,0 +1,144 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 11:52:33,447] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 11:52:34,769] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 11:52:34,769] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 11:52:34,769] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:52:31/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:52:36,438] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:52:36,472] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 11:52:37,597] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 11:52:37,597] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 11:52:37,597] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 11:52:37,597] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 11:52:37,597] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: ^ ^ +100.83.37.188: ^ +100.83.37.188: +100.83.37.188: SyntaxError^SyntaxErrorSyntaxError: +100.83.37.188: : : positional argument follows keyword argumentpositional argument follows keyword argumentpositional argument follows keyword argumentSyntaxError +100.83.37.188: +100.83.37.188: +100.83.37.188: : positional argument follows keyword argument +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: ^ +100.83.37.188: SyntaxError : positional argument follows keyword argument +100.83.37.188: ^ +100.83.37.188: SyntaxError: positional argument follows keyword argument +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: ^ +100.83.37.188: SyntaxError: positional argument follows keyword argument +100.83.37.188: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.188: data_post_process=data_post_process) +100.83.37.188: ^ +100.83.37.188: SyntaxError: positional argument follows keyword argument +100.83.37.175: [2024-05-13 11:52:37,832] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 11:52:37,832] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 11:52:37,832] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 11:52:37,832] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 11:52:37,832] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: ^ +100.83.37.175: ^^SyntaxError +100.83.37.175: +100.83.37.175: : positional argument follows keyword argument +100.83.37.175: SyntaxErrorSyntaxError: : positional argument follows keyword argumentpositional argument follows keyword argument +100.83.37.175: +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 119 +100.83.37.175: data_post_process=data_post_process) +100.83.37.175: ^ +100.83.37.175: SyntaxError : positional argument follows keyword argument +100.83.37.175: ^ +100.83.37.175: SyntaxError : positional argument follows keyword argument^ +100.83.37.175: ^ +100.83.37.175: SyntaxError +100.83.37.175: : SyntaxErrorpositional argument follows keyword argument : +100.83.37.175: positional argument follows keyword argument +100.83.37.175: ^ +100.83.37.175: SyntaxError: positional argument follows keyword argument +100.83.37.188: [2024-05-13 11:52:38,601] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129388 +100.83.37.188: [2024-05-13 11:52:38,602] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129389 +100.83.37.188: [2024-05-13 11:52:38,602] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129390 +100.83.37.188: [2024-05-13 11:52:38,603] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129391 +100.83.37.188: [2024-05-13 11:52:38,603] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129392 +100.83.37.188: [2024-05-13 11:52:38,603] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129393 +100.83.37.188: [2024-05-13 11:52:38,603] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129394 +100.83.37.188: [2024-05-13 11:52:38,604] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129395 +100.83.37.188: [2024-05-13 11:52:38,604] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:52:31/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +100.83.37.175: [2024-05-13 11:52:38,836] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35482 +100.83.37.175: [2024-05-13 11:52:38,838] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35483 +100.83.37.175: [2024-05-13 11:52:38,838] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35484 +100.83.37.175: [2024-05-13 11:52:38,839] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35485 +100.83.37.175: [2024-05-13 11:52:38,839] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35488 +100.83.37.175: [2024-05-13 11:52:38,840] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35489 +100.83.37.175: [2024-05-13 11:52:38,840] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35492 +100.83.37.175: [2024-05-13 11:52:38,840] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35496 +100.83.37.175: [2024-05-13 11:52:38,841] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:52:31/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:52:31/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 1 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 1 diff --git a/llama13b_multiling_800M/13-05-2024-11:52:31/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-11:52:31/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:52:31/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_multiling_800M/13-05-2024-11:55:44/ds_config.json b/llama13b_multiling_800M/13-05-2024-11:55:44/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:55:44/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_multiling_800M/13-05-2024-11:55:44/log.txt b/llama13b_multiling_800M/13-05-2024-11:55:44/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..1abd8adf78e85f4e7da2a816cea1c4bf7be1266c --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:55:44/log.txt @@ -0,0 +1,4800 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-13 11:55:46,493] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-05-13 11:55:47,808] [INFO] [runner.py:574:main] deepspeed_env file = /root/.deepspeed_env +[2024-05-13 11:55:47,808] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: 100.83.37.175,100.83.37.188 +[2024-05-13 11:55:47,808] [INFO] [runner.py:585:main] cmd = pdsh -S -f 1024 -w 100.83.37.175,100.83.37.188 export PYTHONPATH=/Model-References/PyTorch/common:/usr/lib/habanalabs/:$PYTHONPATH; export PYTHON=/usr/bin/python3; export PT_HPU_LAZY_ACC_PAR_MODE=0; export PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES=0; export PT_HPU_ENABLE_WEIGHT_CPU_PERMUTE=0; export MODEL_REFERENCES_ROOT=/Model-References; export ENABLE_CONSOLE=false; export LOG_LEVEL_ALL=4; cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed; /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMzcuMTc1IjogWzAsIDEsIDIsIDMsIDQsIDUsIDYsIDddLCAiMTAwLjgzLjM3LjE4OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --node_rank=%n --master_addr=100.83.37.175 --master_port=29500 --no_python --no_local_rank /usr/bin/bash -c ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:55:44/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA' +100.83.37.175: Warning: Permanently added '[100.83.37.175]:3122' (ED25519) to the list of known hosts. +100.83.37.188: Warning: Permanently added '[100.83.37.188]:3122' (ED25519) to the list of known hosts. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:49,461] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:49,582] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: [2024-05-13 11:55:50,801] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.188: [2024-05-13 11:55:50,801] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=1 +100.83.37.188: [2024-05-13 11:55:50,801] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.188: [2024-05-13 11:55:50,801] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.188: [2024-05-13 11:55:50,801] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: [2024-05-13 11:55:50,839] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [0, 1, 2, 3, 4, 5, 6, 7]} +100.83.37.175: [2024-05-13 11:55:50,839] [INFO] [launch.py:152:main] nnodes=2, num_local_procs=8, node_rank=0 +100.83.37.175: [2024-05-13 11:55:50,839] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.37.175': [0, 1, 2, 3, 4, 5, 6, 7], '100.83.37.188': [8, 9, 10, 11, 12, 13, 14, 15]}) +100.83.37.175: [2024-05-13 11:55:50,839] [INFO] [launch.py:164:main] dist_world_size=16 +100.83.37.175: [2024-05-13 11:55:50,839] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: * Starting OpenBSD Secure Shell server sshd +100.83.37.175: ...done. +100.83.37.175: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: * Starting OpenBSD Secure Shell server sshd +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.188: ...done. +100.83.37.175: [2024-05-13 11:55:52,560] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:52,574] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,579] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,581] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:52,581] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:52,581] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:52,582] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,586] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: [2024-05-13 11:55:52,600] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:52,620] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.175: [2024-05-13 11:55:52,644] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.175: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.175: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,682] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,726] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,732] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:52,732] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. +100.83.37.188: warnings.warn( +100.83.37.188: [2024-05-13 11:55:53,083] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: DeepSpeed C++/CUDA extension op reportDeepSpeed C++/CUDA extension op report +100.83.37.188: +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op.NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: +100.83.37.188: ---------------------------------------------------------------------------------------------------- +100.83.37.188: +100.83.37.188: JIT compiled ops requires ninjaJIT compiled ops requires ninja +100.83.37.188: +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninjaninja .................. ..................[OKAY] +100.83.37.188: [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name op name................ ................installed installed.. ..compatible +100.83.37.188: compatible-------------------------------------------------- +100.83.37.188: +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam cpu_adam............... ...............[NO] [NO]....... .......[OKAY] +100.83.37.188: [OKAY] +100.83.37.188: fused_adam .............fused_adam [NO]............. .......[NO] [OKAY]....... +100.83.37.188: [OKAY]deepspeed_not_implemented +100.83.37.188: [NO]deepspeed_not_implemented ....... [OKAY][NO] +100.83.37.188: ....... transformer_inference[OKAY] +100.83.37.188: .. [NO] transformer_inference....... ..[OKAY] [NO] +100.83.37.188: .......-------------------------------------------------- +100.83.37.188: [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... DeepSpeed general environment info: +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/torch']torch install path +100.83.37.188: ............... torch version .................... ['/usr/local/lib/python3.10/dist-packages/torch']2.1.1a0+gitb51c9f6 +100.83.37.188: +100.83.37.188: ninjatorch versiondeepspeed install path .................... .............................2.1.1a0+gitb51c9f6 +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/deepspeed'][OKAY]deepspeed install path +100.83.37.188: +100.83.37.188: deepspeed info........... -------------------------------------------------- ................... +100.83.37.188: ['/usr/local/lib/python3.10/dist-packages/deepspeed']0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0op name +100.83.37.188: +100.83.37.188: deepspeed infodeepspeed wheel compiled w................. ...................installed ...... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 .. +100.83.37.188: torch 2.1 +100.83.37.188: deepspeed wheel compiled w.compatible shared memory (/dev/shm) size +100.83.37.188: ...... -------------------------------------------------- .... +100.83.37.188: torch 2.1 +100.83.37.188: 503.75 GB +100.83.37.188: shared memory (/dev/shm) size .... cpu_adam503.75 GB +100.83.37.188: ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: ---------------------------------------------------------------------------------------------------- +100.83.37.175: +100.83.37.175: DeepSpeed C++/CUDA extension op reportDeepSpeed C++/CUDA extension op report +100.83.37.175: +100.83.37.175: ---------------------------------------------------------------------------------------------------- +100.83.37.175: +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op.NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: +100.83.37.175: ---------------------------------------------------------------------------------------------------- +100.83.37.175: +100.83.37.175: JIT compiled ops requires ninjaJIT compiled ops requires ninja +100.83.37.175: +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed ..ninja compatible +100.83.37.175: --------------------------------------------------.................. +100.83.37.175: [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name cpu_adam................ ...............installed [NO].. .......compatible +100.83.37.175: [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: fused_adam ............. [NO] ....... cpu_adam[OKAY] +100.83.37.175: ............... [NO]deepspeed_not_implemented ....... [OKAY][NO] +100.83.37.175: ....... [OKAY] +100.83.37.175: fused_adam transformer_inference............. ..[NO] [NO]....... .......[OKAY] +100.83.37.175: [OKAY] +100.83.37.175: --------------------------------------------------deepspeed_not_implemented +100.83.37.175: [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... DeepSpeed general environment info: +100.83.37.175: torch install path['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: ............... torch version ....................['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: 2.1.1a0+gitb51c9f6 +100.83.37.175: torch versiondeepspeed install path ............................... 2.1.1a0+gitb51c9f6 +100.83.37.175: ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed install pathdeepspeed info .............................. 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: +100.83.37.175: deepspeed wheel compiled w.deepspeed info ......................... torch 2.1 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: +100.83.37.175: deepspeed wheel compiled w.shared memory (/dev/shm) size .......... torch 2.1 503.75 GB +100.83.37.175: +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed C++/CUDA extension op report +100.83.37.175: -------------------------------------------------- +100.83.37.175: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.175: runtime if needed. Op compatibility means that your system +100.83.37.175: meet the required dependencies to JIT install the op. +100.83.37.175: -------------------------------------------------- +100.83.37.175: JIT compiled ops requires ninja +100.83.37.175: ninja .................. [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: op name ................ installed .. compatible +100.83.37.175: -------------------------------------------------- +100.83.37.175: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.175: fused_adam ............. [NO] ....... [OKAY] +100.83.37.175: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.175: transformer_inference .. [NO] ....... [OKAY] +100.83.37.175: -------------------------------------------------- +100.83.37.175: DeepSpeed general environment info: +100.83.37.175: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.175: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.175: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.175: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.175: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.175: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.175: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.175: To add an exception for this directory, call: +100.83.37.175: +100.83.37.175: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.175: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.175: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.175: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.175: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.175: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.175: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.175: [--expert-interval EXPERT_INTERVAL] +100.83.37.175: [--hidden-size HIDDEN_SIZE] +100.83.37.175: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.175: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.175: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.175: [--kv-channels KV_CHANNELS] +100.83.37.175: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.175: [--use-rotary-position-embeddings] +100.83.37.175: [--rotary-percent ROTARY_PERCENT] +100.83.37.175: [--no-position-embedding] +100.83.37.175: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.175: [--normalization {layernorm,rmsnorm}] +100.83.37.175: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.175: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.175: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.175: [--apply-residual-connection-post-layernorm] +100.83.37.175: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.175: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.175: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.175: [--untie-embeddings-and-output-weights] +100.83.37.175: [--embedding-weights-in-fp32] +100.83.37.175: [--fix-position-emb-redundant-alloc] +100.83.37.175: [--embed-layernorm] +100.83.37.175: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.175: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.175: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.175: [--weight-decay WEIGHT_DECAY] +100.83.37.175: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.175: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.175: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.175: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.175: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.175: [--sgd-momentum SGD_MOMENTUM] +100.83.37.175: [--do-norm-bias-weight-decay] +100.83.37.175: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.175: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.175: [--batch-size BATCH_SIZE] +100.83.37.175: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.175: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.175: [--recompute-activations] +100.83.37.175: [--recompute-granularity {full,selective}] +100.83.37.175: [--distribute-saved-activations] +100.83.37.175: [--recompute-method {uniform,block}] +100.83.37.175: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.175: [--checkpoint-activations] +100.83.37.175: [--distribute-checkpointed-activations] +100.83.37.175: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.175: [--train-iters TRAIN_ITERS] +100.83.37.175: [--train-samples TRAIN_SAMPLES] +100.83.37.175: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.175: [--log-interval LOG_INTERVAL] +100.83.37.175: [--exit-interval EXIT_INTERVAL] +100.83.37.175: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.175: [--exit-signal-handler] +100.83.37.175: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.175: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.175: [--no-bias-dropout-fusion] +100.83.37.175: [--disable-moe-token-dropping] +100.83.37.175: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.175: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.175: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.175: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.175: [--create-moe-param-group] [--use-flash-attn] +100.83.37.175: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.175: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.175: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.175: [--disable-bias-linear] +100.83.37.175: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.175: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.175: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.175: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.175: [--no-async-tensor-model-parallel-allreduce] +100.83.37.175: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.175: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.175: [--force-ds-sequence-parallel] +100.83.37.175: [--no-gradient-accumulation-fusion] +100.83.37.175: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.175: [--data-parallel-random-init] +100.83.37.175: [--init-method-std INIT_METHOD_STD] +100.83.37.175: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.175: [--lr LR] +100.83.37.175: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.175: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.175: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.175: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.175: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.175: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.175: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.175: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.175: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.175: [--override-opt_param-scheduler] +100.83.37.175: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.175: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.175: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.175: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.175: [--no-initialization] [--use-checkpoint-args] +100.83.37.175: [--exit-on-missing-checkpoint] +100.83.37.175: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.175: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.175: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.175: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.175: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.175: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.175: [--hysteresis HYSTERESIS] +100.83.37.175: [--fp32-residual-connection] +100.83.37.175: [--no-query-key-layer-scaling] +100.83.37.175: [--attention-softmax-in-fp32] +100.83.37.175: [--accumulate-allreduce-grads-in-fp32] +100.83.37.175: [--fp16-lm-cross-entropy] +100.83.37.175: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.175: [--enable-expert-tensor-parallelism] +100.83.37.175: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.175: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.175: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.175: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.175: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.175: [--overlap-p2p-communication] +100.83.37.175: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.175: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.175: [--DDP-impl {local,torch,FSDP}] +100.83.37.175: [--no-contiguous-buffers-in-local-ddp] +100.83.37.175: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.175: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.175: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.175: [--use-cpu-initialization] +100.83.37.175: [--empty-unused-memory-level {0,1,2}] +100.83.37.175: [--standalone-embedding-stage] +100.83.37.175: [--use-distributed-optimizer] +100.83.37.175: [--eval-iters EVAL_ITERS] +100.83.37.175: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.175: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.175: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.175: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.175: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.175: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.175: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.175: [--data-cache-path DATA_CACHE_PATH] +100.83.37.175: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.175: [--merge-file MERGE_FILE] +100.83.37.175: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.175: [--seq-length SEQ_LENGTH] +100.83.37.175: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.175: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.175: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.175: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.175: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.175: [--num-workers NUM_WORKERS] +100.83.37.175: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.175: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.175: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.175: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.175: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.175: [--return-data-index] +100.83.37.175: [--data-efficiency-curriculum-learning] +100.83.37.175: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.175: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.175: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.175: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.175: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.175: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.175: [--adlr-autoresume] +100.83.37.175: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.175: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.175: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.175: [--biencoder-shared-query-context-model] +100.83.37.175: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.175: [--titles-data-path TITLES_DATA_PATH] +100.83.37.175: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.175: [--use-one-sent-docs] +100.83.37.175: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.175: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.175: [--retriever-score-scaling] +100.83.37.175: [--block-data-path BLOCK_DATA_PATH] +100.83.37.175: [--embedding-path EMBEDDING_PATH] +100.83.37.175: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.175: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.175: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.175: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.175: [--patch-dim PATCH_DIM] +100.83.37.175: [--classes-fraction CLASSES_FRACTION] +100.83.37.175: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.175: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.175: [--vision-pretraining] +100.83.37.175: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.175: [--vision-backbone-type {vit,mit,swin}] +100.83.37.175: [--swin-backbone-type {tiny,base,h3}] +100.83.37.175: [--mask-type {random,row}] +100.83.37.175: [--mask-factor MASK_FACTOR] +100.83.37.175: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.175: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.175: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.175: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.175: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.175: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.175: [--dino-norm-last-layer] +100.83.37.175: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.175: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.175: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.175: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.175: [--timing-log-level {0,1,2}] +100.83.37.175: [--no-barrier-with-level-1-timing] +100.83.37.175: [--timing-log-option {max,minmax,all}] +100.83.37.175: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.175: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.175: [--log-timers-to-tensorboard] +100.83.37.175: [--log-batch-size-to-tensorboard] +100.83.37.175: [--no-log-learnig-rate-to-tensorboard] +100.83.37.175: [--no-log-loss-scale-to-tensorboard] +100.83.37.175: [--log-validation-ppl-to-tensorboard] +100.83.37.175: [--log-optimizer-states-to-tensorboard] +100.83.37.175: [--log-memory-to-tensorboard] +100.83.37.175: [--log-world-size-to-tensorboard] +100.83.37.175: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.175: [--zero-contigious-gradients] +100.83.37.175: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.175: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.175: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.175: [--scattered-embeddings] [--split-transformers] +100.83.37.175: [--memory-centric-tiled-linear] +100.83.37.175: [--tile-factor TILE_FACTOR] +100.83.37.175: [--deepspeed-activation-checkpointing] +100.83.37.175: [--partition-activations] +100.83.37.175: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.175: [--synchronize-each-layer] [--profile-backward] +100.83.37.175: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.175: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.175: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.175: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.175: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.175: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.175: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.175: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.175: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.175: [--output-bert-embeddings] +100.83.37.175: [--bert-embedder-type {megatron,huggingface}] +100.83.37.175: [--cache-fp8-weight] +100.83.37.175: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.175: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.175: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.175: [--fp8-interval FP8_INTERVAL] +100.83.37.175: [--transformer-impl {local,transformer_engine}] +100.83.37.175: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.175: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.175: [--retro-workdir RETRO_WORKDIR] +100.83.37.175: [--retro-add-retriever] +100.83.37.175: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.175: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.175: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.175: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.175: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.175: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.175: [--retro-return-doc-ids] +100.83.37.175: [--profile {pt,pt-full,hltv}] +100.83.37.175: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.175: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.175: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.175: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.175: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed C++/CUDA extension op report +100.83.37.188: -------------------------------------------------- +100.83.37.188: NOTE: Ops not installed will be just-in-time (JIT) compiled at +100.83.37.188: runtime if needed. Op compatibility means that your system +100.83.37.188: meet the required dependencies to JIT install the op. +100.83.37.188: -------------------------------------------------- +100.83.37.188: JIT compiled ops requires ninja +100.83.37.188: ninja .................. [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: op name ................ installed .. compatible +100.83.37.188: -------------------------------------------------- +100.83.37.188: cpu_adam ............... [NO] ....... [OKAY] +100.83.37.188: fused_adam ............. [NO] ....... [OKAY] +100.83.37.188: deepspeed_not_implemented [NO] ....... [OKAY] +100.83.37.188: transformer_inference .. [NO] ....... [OKAY] +100.83.37.188: -------------------------------------------------- +100.83.37.188: DeepSpeed general environment info: +100.83.37.188: torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +100.83.37.188: torch version .................... 2.1.1a0+gitb51c9f6 +100.83.37.188: deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +100.83.37.188: deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +100.83.37.188: deepspeed wheel compiled w. ...... torch 2.1 +100.83.37.188: shared memory (/dev/shm) size .... 503.75 GB +100.83.37.188: fatal: detected dubious ownership in repository at '/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed' +100.83.37.188: To add an exception for this directory, call: +100.83.37.188: +100.83.37.188: git config --global --add safe.directory /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed +100.83.37.188: **** Git info for Megatron: git_hash=unknown git_branch=unknown **** +100.83.37.188: usage: pretrain_llama.py [-h] [--num-layers NUM_LAYERS] +100.83.37.188: [--encoder-num-layers ENCODER_NUM_LAYERS] +100.83.37.188: [--decoder-num-layers DECODER_NUM_LAYERS] +100.83.37.188: [--num-experts NUM_EXPERTS [NUM_EXPERTS ...]] +100.83.37.188: [--mlp-type MLP_TYPE] [--topk TOPK] +100.83.37.188: [--expert-interval EXPERT_INTERVAL] +100.83.37.188: [--hidden-size HIDDEN_SIZE] +100.83.37.188: [--ffn-hidden-size FFN_HIDDEN_SIZE] +100.83.37.188: [--num-attention-heads NUM_ATTENTION_HEADS] +100.83.37.188: [--num-key-value-heads NUM_KEY_VALUE_HEADS] +100.83.37.188: [--kv-channels KV_CHANNELS] +100.83.37.188: [--max-position-embeddings MAX_POSITION_EMBEDDINGS] +100.83.37.188: [--use-rotary-position-embeddings] +100.83.37.188: [--rotary-percent ROTARY_PERCENT] +100.83.37.188: [--no-position-embedding] +100.83.37.188: [--make-vocab-size-divisible-by MAKE_VOCAB_SIZE_DIVISIBLE_BY] +100.83.37.188: [--normalization {layernorm,rmsnorm}] +100.83.37.188: [--use-fused-rmsnorm USE_FUSED_RMSNORM] +100.83.37.188: [--layernorm-epsilon LAYERNORM_EPSILON] +100.83.37.188: [--apply-layernorm-1p] [--disable-mem-efficient-ln] +100.83.37.188: [--apply-residual-connection-post-layernorm] +100.83.37.188: [--openai-gelu] [--squared-relu] [--swiglu] +100.83.37.188: [--onnx-safe ONNX_SAFE] [--bert-no-binary-head] +100.83.37.188: [--num-experts-switch NUM_EXPERTS_SWITCH] +100.83.37.188: [--untie-embeddings-and-output-weights] +100.83.37.188: [--embedding-weights-in-fp32] +100.83.37.188: [--fix-position-emb-redundant-alloc] +100.83.37.188: [--embed-layernorm] +100.83.37.188: [--kill-switch-path KILL_SWITCH_PATH] +100.83.37.188: [--attention-dropout ATTENTION_DROPOUT] +100.83.37.188: [--hidden-dropout HIDDEN_DROPOUT] +100.83.37.188: [--weight-decay WEIGHT_DECAY] +100.83.37.188: [--start-weight-decay START_WEIGHT_DECAY] +100.83.37.188: [--end-weight-decay END_WEIGHT_DECAY] +100.83.37.188: [--weight-decay-incr-style {constant,linear,cosine}] +100.83.37.188: [--clip-grad CLIP_GRAD] [--adam-beta1 ADAM_BETA1] +100.83.37.188: [--adam-beta2 ADAM_BETA2] [--adam-eps ADAM_EPS] +100.83.37.188: [--sgd-momentum SGD_MOMENTUM] +100.83.37.188: [--do-norm-bias-weight-decay] +100.83.37.188: [--micro-batch-size MICRO_BATCH_SIZE] +100.83.37.188: [--eval-micro-batch-size EVAL_MICRO_BATCH_SIZE] +100.83.37.188: [--batch-size BATCH_SIZE] +100.83.37.188: [--global-batch-size GLOBAL_BATCH_SIZE] +100.83.37.188: [--rampup-batch-size [RAMPUP_BATCH_SIZE ...]] +100.83.37.188: [--recompute-activations] +100.83.37.188: [--recompute-granularity {full,selective}] +100.83.37.188: [--distribute-saved-activations] +100.83.37.188: [--recompute-method {uniform,block}] +100.83.37.188: [--recompute-num-layers RECOMPUTE_NUM_LAYERS] +100.83.37.188: [--checkpoint-activations] +100.83.37.188: [--distribute-checkpointed-activations] +100.83.37.188: [--checkpoint-num-layers CHECKPOINT_NUM_LAYERS] +100.83.37.188: [--train-iters TRAIN_ITERS] +100.83.37.188: [--train-samples TRAIN_SAMPLES] +100.83.37.188: [--train-tokens TRAIN_TOKENS] [--random-ltd] +100.83.37.188: [--log-interval LOG_INTERVAL] +100.83.37.188: [--exit-interval EXIT_INTERVAL] +100.83.37.188: [--exit-duration-in-mins EXIT_DURATION_IN_MINS] +100.83.37.188: [--exit-signal-handler] +100.83.37.188: [--tensorboard-dir TENSORBOARD_DIR] +100.83.37.188: [--no-masked-softmax-fusion] [--no-bias-gelu-fusion] +100.83.37.188: [--no-bias-dropout-fusion] +100.83.37.188: [--disable-moe-token-dropping] +100.83.37.188: [--moe-train-capacity-factor MOE_TRAIN_CAPACITY_FACTOR] +100.83.37.188: [--moe-eval-capacity-factor MOE_EVAL_CAPACITY_FACTOR] +100.83.37.188: [--moe-min-capacity MOE_MIN_CAPACITY] +100.83.37.188: [--moe-loss-coeff MOE_LOSS_COEFF] +100.83.37.188: [--create-moe-param-group] [--use-flash-attn] +100.83.37.188: [--use-flash-attn-v2] [--use-flash-attn-triton] +100.83.37.188: [--use-fused-sdpa USE_FUSED_SDPA] +100.83.37.188: [--use-fused-sdpa-with-recompute USE_FUSED_SDPA_WITH_RECOMPUTE] +100.83.37.188: [--disable-bias-linear] +100.83.37.188: [--optimizer {adam,sgd,adamw,fusedadamw}] +100.83.37.188: [--dataloader-type {single,cyclic}] [--ds-inference] +100.83.37.188: [--cpu-optimizer] [--cpu_torch_adam] +100.83.37.188: [--no-pipeline-parallel] [--use-tutel] [--inference] +100.83.37.188: [--no-async-tensor-model-parallel-allreduce] +100.83.37.188: [--no-persist-layer-norm] [--sequence-parallel] +100.83.37.188: [--ds-sequence-parallel-size DS_SEQUENCE_PARALLEL_SIZE] +100.83.37.188: [--force-ds-sequence-parallel] +100.83.37.188: [--no-gradient-accumulation-fusion] +100.83.37.188: [--use-dataset-only USE_DATASET_ONLY] [--seed SEED] +100.83.37.188: [--data-parallel-random-init] +100.83.37.188: [--init-method-std INIT_METHOD_STD] +100.83.37.188: [--init-method-xavier-uniform] [--no-scaled-init] +100.83.37.188: [--lr LR] +100.83.37.188: [--lr-decay-style {constant,linear,cosine,inverse-square-root}] +100.83.37.188: [--lr-decay-iters LR_DECAY_ITERS] +100.83.37.188: [--lr-decay-samples LR_DECAY_SAMPLES] +100.83.37.188: [--lr-decay-tokens LR_DECAY_TOKENS] +100.83.37.188: [--lr-warmup-fraction LR_WARMUP_FRACTION] +100.83.37.188: [--lr-warmup-iters LR_WARMUP_ITERS] +100.83.37.188: [--lr-warmup-samples LR_WARMUP_SAMPLES] +100.83.37.188: [--lr-warmup-tokens LR_WARMUP_TOKENS] +100.83.37.188: [--warmup WARMUP] [--min-lr MIN_LR] +100.83.37.188: [--override-opt_param-scheduler] +100.83.37.188: [--use-checkpoint-opt_param-scheduler] [--save SAVE] +100.83.37.188: [--save-interval SAVE_INTERVAL] [--no-save-optim] +100.83.37.188: [--no-save-rng] [--load LOAD] [--no-load-optim] +100.83.37.188: [--no-load-rng] [--no-load-lr-state] [--finetune] +100.83.37.188: [--no-initialization] [--use-checkpoint-args] +100.83.37.188: [--exit-on-missing-checkpoint] +100.83.37.188: [--universal-checkpoint] [--verify-checkpoint] +100.83.37.188: [--verify-checkpoint-model-type {GPT,BLOOM,LLAMA}] +100.83.37.188: [--fp16] [--bf16] [--loss-scale LOSS_SCALE] +100.83.37.188: [--initial-loss-scale INITIAL_LOSS_SCALE] +100.83.37.188: [--min-loss-scale MIN_LOSS_SCALE] +100.83.37.188: [--loss-scale-window LOSS_SCALE_WINDOW] +100.83.37.188: [--hysteresis HYSTERESIS] +100.83.37.188: [--fp32-residual-connection] +100.83.37.188: [--no-query-key-layer-scaling] +100.83.37.188: [--attention-softmax-in-fp32] +100.83.37.188: [--accumulate-allreduce-grads-in-fp32] +100.83.37.188: [--fp16-lm-cross-entropy] +100.83.37.188: [--tensor-model-parallel-size TENSOR_MODEL_PARALLEL_SIZE] +100.83.37.188: [--enable-expert-tensor-parallelism] +100.83.37.188: [--pipeline-model-parallel-size PIPELINE_MODEL_PARALLEL_SIZE] +100.83.37.188: [--pipeline-model-parallel-split-rank PIPELINE_MODEL_PARALLEL_SPLIT_RANK] +100.83.37.188: [--moe-expert-parallel-size MOE_EXPERT_PARALLEL_SIZE] +100.83.37.188: [--model-parallel-size MODEL_PARALLEL_SIZE] +100.83.37.188: [--num-layers-per-virtual-pipeline-stage NUM_LAYERS_PER_VIRTUAL_PIPELINE_STAGE] +100.83.37.188: [--overlap-p2p-communication] +100.83.37.188: [--distributed-backend {nccl,gloo,ccl,hccl}] +100.83.37.188: [--distributed-timeout-minutes DISTRIBUTED_TIMEOUT_MINUTES] +100.83.37.188: [--DDP-impl {local,torch,FSDP}] +100.83.37.188: [--no-contiguous-buffers-in-local-ddp] +100.83.37.188: [--no-scatter-gather-tensors-in-pipeline] +100.83.37.188: [--use-ring-exchange-p2p] [--local-rank LOCAL_RANK] +100.83.37.188: [--lazy-mpu-init LAZY_MPU_INIT] +100.83.37.188: [--use-cpu-initialization] +100.83.37.188: [--empty-unused-memory-level {0,1,2}] +100.83.37.188: [--standalone-embedding-stage] +100.83.37.188: [--use-distributed-optimizer] +100.83.37.188: [--eval-iters EVAL_ITERS] +100.83.37.188: [--eval-interval EVAL_INTERVAL] [--skip-train] +100.83.37.188: [--eval-loss-exit-value EVAL_LOSS_EXIT_VALUE] +100.83.37.188: [--aml-data-download-path AML_DATA_DOWNLOAD_PATH] +100.83.37.188: [--data-path [DATA_PATH ...]] [--split SPLIT] +100.83.37.188: [--train-data-path [TRAIN_DATA_PATH ...]] +100.83.37.188: [--valid-data-path [VALID_DATA_PATH ...]] +100.83.37.188: [--test-data-path [TEST_DATA_PATH ...]] +100.83.37.188: [--data-cache-path DATA_CACHE_PATH] +100.83.37.188: [--vocab-size VOCAB_SIZE] [--vocab-file VOCAB_FILE] +100.83.37.188: [--merge-file MERGE_FILE] +100.83.37.188: [--vocab-extra-ids VOCAB_EXTRA_IDS] +100.83.37.188: [--seq-length SEQ_LENGTH] +100.83.37.188: [--encoder-seq-length ENCODER_SEQ_LENGTH] +100.83.37.188: [--decoder-seq-length DECODER_SEQ_LENGTH] +100.83.37.188: [--retriever-seq-length RETRIEVER_SEQ_LENGTH] +100.83.37.188: [--sample-rate SAMPLE_RATE] [--mask-prob MASK_PROB] +100.83.37.188: [--short-seq-prob SHORT_SEQ_PROB] [--mmap-warmup] +100.83.37.188: [--num-workers NUM_WORKERS] +100.83.37.188: [--tokenizer-type {BertWordPieceLowerCase,BertWordPieceCase,GPT2BPETokenizer,SentencePieceTokenizer,GPTSentencePieceTokenizer,NullTokenizer}] +100.83.37.188: [--tokenizer-model TOKENIZER_MODEL] +100.83.37.188: [--data-impl {mmap,infer}] [--reset-position-ids] +100.83.37.188: [--reset-attention-mask] [--eod-mask-loss] +100.83.37.188: [--train-data-exact-num-epochs TRAIN_DATA_EXACT_NUM_EPOCHS] +100.83.37.188: [--return-data-index] +100.83.37.188: [--data-efficiency-curriculum-learning] +100.83.37.188: [--train-idx-path TRAIN_IDX_PATH] +100.83.37.188: [--train-desc-path TRAIN_DESC_PATH] +100.83.37.188: [--train-doc-idx-path TRAIN_DOC_IDX_PATH] +100.83.37.188: [--train-sample-idx-path TRAIN_SAMPLE_IDX_PATH] +100.83.37.188: [--train-shuffle-idx-path TRAIN_SHUFFLE_IDX_PATH] +100.83.37.188: [--mask-tensor-adding] [--no-seq-len-plus-one-tokens] +100.83.37.188: [--adlr-autoresume] +100.83.37.188: [--adlr-autoresume-interval ADLR_AUTORESUME_INTERVAL] +100.83.37.188: [--ict-head-size ICT_HEAD_SIZE] +100.83.37.188: [--biencoder-projection-dim BIENCODER_PROJECTION_DIM] +100.83.37.188: [--biencoder-shared-query-context-model] +100.83.37.188: [--ict-load ICT_LOAD] [--bert-load BERT_LOAD] +100.83.37.188: [--titles-data-path TITLES_DATA_PATH] +100.83.37.188: [--query-in-block-prob QUERY_IN_BLOCK_PROB] +100.83.37.188: [--use-one-sent-docs] +100.83.37.188: [--evidence-data-path EVIDENCE_DATA_PATH] +100.83.37.188: [--retriever-report-topk-accuracies RETRIEVER_REPORT_TOPK_ACCURACIES [RETRIEVER_REPORT_TOPK_ACCURACIES ...]] +100.83.37.188: [--retriever-score-scaling] +100.83.37.188: [--block-data-path BLOCK_DATA_PATH] +100.83.37.188: [--embedding-path EMBEDDING_PATH] +100.83.37.188: [--indexer-batch-size INDEXER_BATCH_SIZE] +100.83.37.188: [--indexer-log-interval INDEXER_LOG_INTERVAL] +100.83.37.188: [--num-classes NUM_CLASSES] [--img-h IMG_H] +100.83.37.188: [--img-w IMG_W] [--num-channels NUM_CHANNELS] +100.83.37.188: [--patch-dim PATCH_DIM] +100.83.37.188: [--classes-fraction CLASSES_FRACTION] +100.83.37.188: [--data-per-class-fraction DATA_PER_CLASS_FRACTION] +100.83.37.188: [--no-data-sharding] [--head-lr-mult HEAD_LR_MULT] +100.83.37.188: [--vision-pretraining] +100.83.37.188: [--vision-pretraining-type {classify,inpaint,dino}] +100.83.37.188: [--vision-backbone-type {vit,mit,swin}] +100.83.37.188: [--swin-backbone-type {tiny,base,h3}] +100.83.37.188: [--mask-type {random,row}] +100.83.37.188: [--mask-factor MASK_FACTOR] +100.83.37.188: [--iter-per-epoch ITER_PER_EPOCH] +100.83.37.188: [--dino-local-img-size DINO_LOCAL_IMG_SIZE] +100.83.37.188: [--dino-local-crops-number DINO_LOCAL_CROPS_NUMBER] +100.83.37.188: [--dino-head-hidden-size DINO_HEAD_HIDDEN_SIZE] +100.83.37.188: [--dino-bottleneck-size DINO_BOTTLENECK_SIZE] +100.83.37.188: [--dino-freeze-last-layer DINO_FREEZE_LAST_LAYER] +100.83.37.188: [--dino-norm-last-layer] +100.83.37.188: [--dino-warmup-teacher-temp DINO_WARMUP_TEACHER_TEMP] +100.83.37.188: [--dino-teacher-temp DINO_TEACHER_TEMP] +100.83.37.188: [--dino-warmup-teacher-temp-epochs DINO_WARMUP_TEACHER_TEMP_EPOCHS] +100.83.37.188: [--log-params-norm] [--log-num-zeros-in-grad] +100.83.37.188: [--timing-log-level {0,1,2}] +100.83.37.188: [--no-barrier-with-level-1-timing] +100.83.37.188: [--timing-log-option {max,minmax,all}] +100.83.37.188: [--tensorboard-log-interval TENSORBOARD_LOG_INTERVAL] +100.83.37.188: [--tensorboard-queue-size TENSORBOARD_QUEUE_SIZE] +100.83.37.188: [--log-timers-to-tensorboard] +100.83.37.188: [--log-batch-size-to-tensorboard] +100.83.37.188: [--no-log-learnig-rate-to-tensorboard] +100.83.37.188: [--no-log-loss-scale-to-tensorboard] +100.83.37.188: [--log-validation-ppl-to-tensorboard] +100.83.37.188: [--log-optimizer-states-to-tensorboard] +100.83.37.188: [--log-memory-to-tensorboard] +100.83.37.188: [--log-world-size-to-tensorboard] +100.83.37.188: [--zero-stage ZERO_STAGE] [--zero-reduce-scatter] +100.83.37.188: [--zero-contigious-gradients] +100.83.37.188: [--zero-reduce-bucket-size ZERO_REDUCE_BUCKET_SIZE] +100.83.37.188: [--zero-allgather-bucket-size ZERO_ALLGATHER_BUCKET_SIZE] +100.83.37.188: [--remote-device {none,cpu,nvme}] [--use-pin-memory] +100.83.37.188: [--scattered-embeddings] [--split-transformers] +100.83.37.188: [--memory-centric-tiled-linear] +100.83.37.188: [--tile-factor TILE_FACTOR] +100.83.37.188: [--deepspeed-activation-checkpointing] +100.83.37.188: [--partition-activations] +100.83.37.188: [--contigious-checkpointing] [--checkpoint-in-cpu] +100.83.37.188: [--synchronize-each-layer] [--profile-backward] +100.83.37.188: [--num-layers-teacher NUM_LAYERS_TEACHER] +100.83.37.188: [--num-experts-teacher NUM_EXPERTS_TEACHER [NUM_EXPERTS_TEACHER ...]] +100.83.37.188: [--hidden-size-teacher HIDDEN_SIZE_TEACHER] +100.83.37.188: [--num-attention-heads-teacher NUM_ATTENTION_HEADS_TEACHER] +100.83.37.188: [--mos] [--kd] [--kd-alpha-ce KD_ALPHA_CE] +100.83.37.188: [--kd-beta-ce KD_BETA_CE] [--kd-temp KD_TEMP] +100.83.37.188: [--reset-iteration] [--load-teacher LOAD_TEACHER] +100.83.37.188: [--inference-batch-times-seqlen-threshold INFERENCE_BATCH_TIMES_SEQLEN_THRESHOLD] +100.83.37.188: [--max-tokens-to-oom MAX_TOKENS_TO_OOM] +100.83.37.188: [--output-bert-embeddings] +100.83.37.188: [--bert-embedder-type {megatron,huggingface}] +100.83.37.188: [--cache-fp8-weight] +100.83.37.188: [--cache-fp8-weight-fwd CACHE_FP8_WEIGHT_FWD] +100.83.37.188: [--fp8-e5m2] [--fp8-e4m3] [--fp8-hybrid] +100.83.37.188: [--no-fp8-wgrad] [--fp8-margin FP8_MARGIN] +100.83.37.188: [--fp8-interval FP8_INTERVAL] +100.83.37.188: [--transformer-impl {local,transformer_engine}] +100.83.37.188: [--fp8-amax-history-len FP8_AMAX_HISTORY_LEN] +100.83.37.188: [--fp8-amax-compute-algo {most_recent,max}] +100.83.37.188: [--retro-workdir RETRO_WORKDIR] +100.83.37.188: [--retro-add-retriever] +100.83.37.188: [--retro-cyclic-train-iters RETRO_CYCLIC_TRAIN_ITERS] +100.83.37.188: [--retro-encoder-layers RETRO_ENCODER_LAYERS] +100.83.37.188: [--retro-encoder-hidden-dropout RETRO_ENCODER_HIDDEN_DROPOUT] +100.83.37.188: [--retro-encoder-attention-dropout RETRO_ENCODER_ATTENTION_DROPOUT] +100.83.37.188: [--retro-num-neighbors RETRO_NUM_NEIGHBORS] +100.83.37.188: [--retro-num-retrieved-chunks RETRO_NUM_RETRIEVED_CHUNKS] +100.83.37.188: [--retro-return-doc-ids] +100.83.37.188: [--profile {pt,pt-full,hltv}] +100.83.37.188: [--profile-steps PROFILE_STEPS] [--deepspeed] +100.83.37.188: [--deepspeed_config DEEPSPEED_CONFIG] [--deepscale] +100.83.37.188: [--deepscale_config DEEPSCALE_CONFIG] +100.83.37.188: [--deepspeed_mpi] [--use_hpu] [--no_cuda] +100.83.37.188: pretrain_llama.py: error: unrecognized arguments: --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --use-torch-compile false --hpu-deterministic --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt +100.83.37.188: [2024-05-13 11:55:55,808] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129641 +100.83.37.188: [2024-05-13 11:55:55,809] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129642 +100.83.37.188: [2024-05-13 11:55:55,810] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129643 +100.83.37.188: [2024-05-13 11:55:55,810] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129644 +100.83.37.188: [2024-05-13 11:55:55,810] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129645 +100.83.37.188: [2024-05-13 11:55:55,811] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129646 +100.83.37.188: [2024-05-13 11:55:55,811] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129647 +100.83.37.188: [2024-05-13 11:55:55,811] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 129648 +100.83.37.188: [2024-05-13 11:55:55,811] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:55:44/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 2 +100.83.37.175: [2024-05-13 11:55:55,848] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35922 +100.83.37.175: [2024-05-13 11:55:55,849] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35923 +100.83.37.175: [2024-05-13 11:55:55,850] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35924 +100.83.37.175: [2024-05-13 11:55:55,850] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35925 +100.83.37.175: [2024-05-13 11:55:55,851] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35929 +100.83.37.175: [2024-05-13 11:55:55,851] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35934 +100.83.37.175: [2024-05-13 11:55:55,852] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35936 +100.83.37.175: [2024-05-13 11:55:55,852] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 35940 +100.83.37.175: [2024-05-13 11:55:55,852] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 24 --hidden-size 2048 --ffn-hidden-size 4096 --num-attention-heads 32 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 500 --data-path /data/hineng/tokenizer//_raw_content_document --vocab-file /data/hineng/tokenizer//gpt2-vocab.json --merge-file /data/hineng/tokenizer//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_multiling_800M/13-05-2024-11:55:44/ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/checkpoints_zero_stage_2 --hf-save /data/output/llama13b_multiling_800M/13-05-2024-11:55:44/hf_ckpt --save-interval 500 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 2 +pdsh@vizzhy-150-3: 100.83.37.175: ssh exited with exit code 2 +pdsh@vizzhy-150-3: 100.83.37.188: ssh exited with exit code 2 diff --git a/llama13b_multiling_800M/13-05-2024-11:55:44/mds_to_hf_llama_custom.json b/llama13b_multiling_800M/13-05-2024-11:55:44/mds_to_hf_llama_custom.json new file mode 100644 index 0000000000000000000000000000000000000000..984df1b8e167dc47c40209bd7e305c2e697f43a2 --- /dev/null +++ b/llama13b_multiling_800M/13-05-2024-11:55:44/mds_to_hf_llama_custom.json @@ -0,0 +1,40 @@ +{ + "MODEL": { + "num_hidden_layers": 24, + "hidden_size": 2048, + "num_attention_heads": 32, + "intermediate_size": 4096, + "vocab_size":VOCAB_SIZE + }, + "LAYER_MAPPINGS" : { + "word_embeddings": 1, + "transformer": [3, 26], + "final_layernorm": 28, + "final_word_embeddings": 29 + }, + "FULL_NAME_MAPPINGS": { + }, + "PARTIAL_NAME_MAPPINGS": { + "final_word_embeddings": { + "vocab_parallel_projection": "lm_head" + }, + "final_layernorm": { + "final_rmsnorm": "model.norm" + }, + "word_embeddings": { + "word_embeddings": "model.embed_tokens" + }, + "transformer": { + "dense_h_to_4h": "mlp.gate_proj", + "dense_4h_to_h": "mlp.down_proj", + "dense_h_to_4h_swiglu": "mlp.up_proj", + "post_attention_layernorm": "post_attention_layernorm", + "input_layernorm": "input_layernorm", + "dense": "self_attn.o_proj", + "query_key_value": {"query": "self_attn.q_proj", "key": "self_attn.k_proj", "value": "self_attn.v_proj"} + } + }, + "SPECIAL": { + "query_key_value": "attention_qkv" + } +} diff --git a/llama13b_x/ds_config.json b/llama13b_x/ds_config.json new file mode 100644 index 0000000000000000000000000000000000000000..d0ee47e9cda5f537a1be4430e2806d723acd2356 --- /dev/null +++ b/llama13b_x/ds_config.json @@ -0,0 +1,19 @@ +{ + "train_batch_size" : 256, + "train_micro_batch_size_per_gpu": 1, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 0 + }, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": true + }, + "fp16": {"enabled": false}, + "wall_clock_breakdown": false, + "pipeline": { + "pipe_partitioned": false, + "grad_partitioned": false + } +} diff --git a/llama13b_x/log.txt b/llama13b_x/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a5a496877a7fb5749f8404d9b05283b178d41b7 --- /dev/null +++ b/llama13b_x/log.txt @@ -0,0 +1,857 @@ +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:35:41,775] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-17 13:36:22,312] [INFO] [runner.py:585:main] cmd = /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMTM0LjE1OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --master_addr=100.83.134.158 --master_port=29500 --no_python --no_local_rank --enable_each_rank_log=None /usr/bin/bash -c cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 16 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 100 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_x//tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_x//checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_x//ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_x//checkpoints_zero_stage_2 --hf-save /data/output/llama13b_x//hf_ckpt --save-interval 100 --verify-checkpoint --verify-checkpoint-model-type LLAMA +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:23,803] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +[2024-04-17 13:36:24,923] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7]} +[2024-04-17 13:36:24,923] [INFO] [launch.py:152:main] nnodes=1, num_local_procs=8, node_rank=0 +[2024-04-17 13:36:24,923] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(, {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2024-04-17 13:36:24,923] [INFO] [launch.py:164:main] dist_world_size=8 +[2024-04-17 13:36:24,923] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,736] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,736] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,798] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,801] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,864] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,902] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,912] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-04-17 13:36:26,977] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +using world size: 8, data-parallel-size: 8, tensor-model-parallel size: 1, pipeline-model-parallel size: 1 +accumulate and all-reduce gradients in fp32 for bfloat16 data type. +using torch.bfloat16 for parameters ... +------------------------ arguments ------------------------ + accumulate_allreduce_grads_in_fp32 .............. True + activation_func_type ............................ swiglu + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.95 + adam_eps ........................................ 1e-06 + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + aml_data_download_path .......................... None + apply_layernorm_weight_plus_one ................. False + apply_query_key_layer_scaling ................... True + apply_residual_connection_post_layernorm ........ False + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + bert_binary_head ................................ True + bert_load ....................................... None + bf16 ............................................ True + bias_dropout_fusion ............................. False + bias_gelu_fusion ................................ False + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + cache_fp8_weight ................................ False + cache_fp8_weight_fwd ............................ True + checkpoint_activations .......................... False + checkpoint_activations_granularity .............. full + checkpoint_in_cpu ............................... False + checkpoint_num_layers ........................... 1 + clearml_config_path ............................. None + clearml_continue_exp ............................ False + clearml_exp_name ................................ None + clip_grad ....................................... 1.0 + compression_training ............................ False + consumed_train_samples .......................... 0 + consumed_train_tokens ........................... 0 + consumed_valid_samples .......................... 0 + contigious_checkpointing ........................ False + cpu_optimizer ................................... False + cpu_torch_adam .................................. False + create_moe_param_group .......................... False + curriculum_learning ............................. False + data_idx_path ................................... None + data_impl ....................................... infer + data_parallel_size .............................. 8 + data_path ....................................... ['/data/arxiv//tokenized_text_document'] + data_sharding ................................... True + dataloader_type ................................. single + DDP_impl ........................................ local + decoder_seq_length .............................. None + deepscale ....................................... False + deepscale_config ................................ None + deepspeed ....................................... True + deepspeed_activation_checkpointing .............. False + deepspeed_config ................................ /data/output/llama13b_x//ds_config.json + deepspeed_mpi ................................... False + distribute_checkpointed_activations ............. False + distributed_backend ............................. hccl + do_layernorm_bias_weight_decay .................. False + do_pretrain_validation .......................... False + ds_inference .................................... False + ds_pipeline_enabled ............................. True + embed_layernorm ................................. False + embedding_path .................................. None + enable_expert_tensor_parallelism ................ False + encoder_seq_length .............................. 2048 + eod_mask_loss ................................... False + eval_interval ................................... 100 + eval_iters ...................................... 10 + eval_loss_exit_value ............................ None + eval_micro_batch_size ........................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... 0 + expert_interval ................................. 2 + ffn_hidden_coeff ................................ 2.6666666666666665 + ffn_hidden_size ................................. 13824 + finetune ........................................ False + fix_position_emb_redundant_alloc ................ False + flatten_linear_operands ......................... False + fp16 ............................................ False + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + global_batch_size ............................... 256 + hf_save ......................................... /data/output/llama13b_x//hf_ckpt + hidden_dropout .................................. 0.1 + hidden_size ..................................... 5120 + hidden_size_teacher ............................. None + hpu_deterministic ............................... True + hpu_fp8_format .................................. e5m2 + hpu_fp8_measure_interval ........................ 10 + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_dim ......................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference ....................................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + initial_loss_scale .............................. 4294967296 + kd .............................................. False + kd_alpha_ce ..................................... 1 + kd_beta_ce ...................................... 1 + kd_temp ......................................... 1.0 + kill_switch_path ................................ None + kv_channels ..................................... 128 + layernorm_epsilon ............................... 1e-06 + layernorm_type .................................. rmsnorm + lazy_mpu_init ................................... None + load ............................................ /data/output/llama13b_x//checkpoints_zero_stage_2 + load_teacher .................................... None + local_rank ...................................... 0 + log_batch_size_to_tensorboard ................... True + log_bwd_grads ................................... False + log_fwd_activations ............................. False + log_interval .................................... 10 + log_learning_rate_to_tensorboard ................ True + log_loss_scale_to_tensorboard ................... True + log_model_inputs ................................ False + log_num_zeros_in_grad ........................... False + log_optimizer_states_to_tensorboard ............. False + log_params_norm ................................. False + log_timers_to_tensorboard ....................... True + log_validation_ppl_to_tensorboard ............... True + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0003 + lr_decay_iters .................................. None + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_decay_tokens ................................. None + lr_warmup_fraction .............................. None + lr_warmup_iters ................................. 2000 + lr_warmup_samples ............................... 0 + lr_warmup_tokens ................................ None + make_vocab_size_divisible_by .................... 128 + mask_prob ....................................... 0.15 + mask_tensor_adding .............................. False + masked_softmax_fusion ........................... False + max_position_embeddings ......................... None + memory_centric_tiled_linear ..................... False + merge_file ...................................... /data/arxiv//gpt2-merges.txt + micro_batch_size ................................ 1 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_type ........................................ standard + mmap_warmup ..................................... False + moe_eval_capacity_factor ........................ 1.0 + moe_expert_parallel_size ........................ 1 + moe_loss_coeff .................................. 0.1 + moe_min_capacity ................................ 4 + moe_token_dropping .............................. True + moe_train_capacity_factor ....................... 1.0 + mos ............................................. False + no_bias ......................................... True + no_cuda ......................................... False + no_load_lr_state ................................ False + no_load_optim ................................... None + no_load_rng ..................................... None + no_pipeline_parallel ............................ False + no_save_optim ................................... None + no_save_rng ..................................... None + no_scaled_init .................................. False + num_attention_heads ............................. 40 + num_attention_heads_teacher ..................... None + num_channels .................................... 3 + num_classes ..................................... 1000 + num_experts ..................................... [1] + num_experts_teacher ............................. [1] + num_key_value_heads ............................. 40 + num_layers ...................................... 16 + num_layers_per_virtual_pipeline_stage ........... None + num_layers_teacher .............................. None + num_workers ..................................... 2 + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adamw + override_lr_scheduler ........................... False + params_dtype .................................... torch.bfloat16 + partition_activations ........................... False + patch_dim ....................................... 16 + pipeline_model_parallel_size .................... 1 + position_embedding_type ......................... PositionEmbeddingType.rotary + profile ......................................... None + profile_backward ................................ False + profile_steps ................................... 2,3 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + remote_device ................................... none + reset_attention_mask ............................ False + reset_iteration ................................. False + reset_position_ids .............................. False + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + sample_rate ..................................... 1.0 + save ............................................ /data/output/llama13b_x//checkpoints_zero_stage_2 + save_interval ................................... 100 + scatter_gather_tensors_in_pipeline .............. True + scattered_embeddings ............................ False + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + split ........................................... 969, 30, 1 + split_transformers .............................. False + synchronize_each_layer .......................... False + tensor_logger_max_iter .......................... 0 + tensor_logger_path .............................. None + tensor_model_parallel_size ...................... 1 + tensorboard_dir ................................. /data/output/llama13b_x//tensorboard + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + tile_factor ..................................... 1 + titles_data_path ................................ None + tokenizer_eod_id ................................ None + tokenizer_model_file ............................ None + tokenizer_type .................................. GPT2BPETokenizer + topk ............................................ 1 + train_data_path ................................. None + train_iters ..................................... 10000 + train_samples ................................... None + train_tokens .................................... None + universal_checkpoint ............................ False + use_checkpoint_lr_scheduler ..................... False + use_contiguous_buffers_in_ddp ................... True + use_cpu_initialization .......................... None + use_fused_sdpa .................................. True + use_fused_sdpa_with_recompute ................... False + use_hpu ......................................... True + use_hpu_fp8_transformer_engine .................. False + use_hpu_graphs .................................. False + use_one_sent_docs ............................... False + use_pin_memory .................................. False + use_rotary_v2 ................................... False + use_seq_len_plus_one_tokens ..................... True + use_torch_compile ............................... False + use_tutel ....................................... False + valid_data_path ................................. None + verify_checkpoint ............................... True + verify_checkpoint_model_type .................... LLAMA + verify_tp_workers ............................... False + verify_tp_workers_hash .......................... False + virtual_pipeline_model_parallel_size ............ None + vocab_extra_ids ................................. 0 + vocab_file ...................................... /data/arxiv//gpt2-vocab.json + weight_decay .................................... 0.1 + world_size ...................................... 8 + zero_allgather_bucket_size ...................... 0.0 + zero_contigious_gradients ....................... False + zero_reduce_bucket_size ......................... 0.0 + zero_reduce_scatter ............................. False + zero_stage ...................................... 0 +-------------------- end of arguments --------------------- +setting number of micro-batches to constant 32 +setting number of micro-batches to constant 32 +> building GPT2BPETokenizer tokenizer ... +_initialize_distributed: Initializing with below params: +args.local_rank: 2 +args.world_size: 8 +args.rank: 2 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +_initialize_distributed: Initializing with below params: +args.local_rank: 4 +args.world_size: 8 +args.rank: 4 +args.distributed_backend: hccl +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB + > padded vocab (size: 50257) with 47 dummy tokens (new size: 50304) +_initialize_distributed: Initializing with below params: +args.local_rank: 0 +args.world_size: 8 +args.rank: 0 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-17 13:36:29,411] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,411] [INFO] [comm.py:637:init_distributed] cdb=None +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-17 13:36:29,416] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,416] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 1 +args.world_size: 8 +args.rank: 1 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 6 +args.world_size: 8 +args.rank: 6 +args.distributed_backend: hccl +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +-------------------------------------------------- +DeepSpeed C++/CUDA extension op report +-------------------------------------------------- +NOTE: Ops not installed will be just-in-time (JIT) compiled at + runtime if needed. Op compatibility means that your system + meet the required dependencies to JIT install the op. +-------------------------------------------------- +JIT compiled ops requires ninja +ninja .................. [OKAY] +-------------------------------------------------- +op name ................ installed .. compatible +-------------------------------------------------- +cpu_adam ............... [NO] ....... [OKAY] +fused_adam ............. [NO] ....... [OKAY] +deepspeed_not_implemented [NO] ....... [OKAY] +transformer_inference .. [NO] ....... [OKAY] +-------------------------------------------------- +DeepSpeed general environment info: +torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] +torch version .................... 2.1.1a0+gitb51c9f6 +deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] +deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 +deepspeed wheel compiled w. ...... torch 2.1 +shared memory (/dev/shm) size .... 503.72 GB +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +> initializing torch distributed ... +[2024-04-17 13:36:29,471] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,471] [INFO] [comm.py:637:init_distributed] cdb=None +[2024-04-17 13:36:29,471] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +fatal: detected dubious ownership in repository at '/Model-References' +To add an exception for this directory, call: + + git config --global --add safe.directory /Model-References +[2024-04-17 13:36:29,495] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,496] [INFO] [comm.py:637:init_distributed] cdb=None +**** Git info for Megatron: git_hash=unknown git_branch=unknown **** +> setting tensorboard ... +_initialize_distributed: Initializing with below params: +args.local_rank: 7 +args.world_size: 8 +args.rank: 7 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-17 13:36:29,532] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,532] [INFO] [comm.py:637:init_distributed] cdb=None +_initialize_distributed: Initializing with below params: +args.local_rank: 3 +args.world_size: 8 +args.rank: 3 +args.distributed_backend: hccl +_initialize_distributed: Initializing with below params: +args.local_rank: 5 +args.world_size: 8 +args.rank: 5 +args.distributed_backend: hccl +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-17 13:36:29,568] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,568] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-17 13:36:29,609] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,610] [INFO] [comm.py:637:init_distributed] cdb=None +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +hccl device_count: 8 +[2024-04-17 13:36:29,627] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented +[2024-04-17 13:36:29,627] [INFO] [comm.py:637:init_distributed] cdb=None +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group +Traceback (most recent call last): + store, rank, world_size = next(rendezvous_iterator) + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +Traceback (most recent call last): + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain + initialize_megatron(extra_args_provider=extra_args_provider, + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron + finish_mpu_init() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init + _initialize_distributed() + File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed + deepspeed.init_distributed(dist_backend=args.distributed_backend) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ + self.init_process_group(backend, timeout, init_method, rank, world_size) + File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group + torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) + File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group + return init_process_group_orig( + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper + func_return = func(*args, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group + store, rank, world_size = next(rendezvous_iterator) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler + store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) + File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store + return TCPStore( +TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). +[2024-04-17 14:06:29,834] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2748 +[2024-04-17 14:06:29,931] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2749 +[2024-04-17 14:06:29,931] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2750 +[2024-04-17 14:06:29,932] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2751 +[2024-04-17 14:06:29,984] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2752 +[2024-04-17 14:06:29,985] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2753 +[2024-04-17 14:06:30,037] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2754 +[2024-04-17 14:06:30,037] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2755 +[2024-04-17 14:06:30,037] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 16 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 100 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_x//tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_x//checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_x//ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_x//checkpoints_zero_stage_2 --hf-save /data/output/llama13b_x//hf_ckpt --save-interval 100 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 diff --git a/univ_ckpt_new/zero/10.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/10.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/10.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/10.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/10.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/10.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/10.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/10.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..422b58e7b2c8b0778d1f3461e883cf888523c1be --- /dev/null +++ b/univ_ckpt_new/zero/10.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a043f8d030f6d90774747a9dfbec789a3446581c69fdc595d50538286a9eb9a6 +size 16778381 diff --git a/univ_ckpt_new/zero/10.input_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/10.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/10.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/10.input_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/10.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/10.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/10.input_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/10.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/10.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/univ_ckpt_new/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/11.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/11.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/11.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/11.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/11.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/11.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/11.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/11.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..30a0ee8beb1b60c85ee0b3e510d2bd6f05a95daf --- /dev/null +++ b/univ_ckpt_new/zero/11.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faa3349750bb6efe5e60cd738a91511d2465f0c5d1b48baebb81748742ff5897 +size 16778381 diff --git a/univ_ckpt_new/zero/11.attention.query_key_value.weight/exp_avg.pt b/univ_ckpt_new/zero/11.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..adc0edc2492bc504b35a973a133a2479bcd5d3a3 --- /dev/null +++ b/univ_ckpt_new/zero/11.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76691d43fea6c7c10abc14ffaf54a9ae8066865ced294aa223188fe378f9b99b +size 50332892 diff --git a/univ_ckpt_new/zero/11.attention.query_key_value.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/11.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..26383e2be4b6b92a52367b816c3ae73a7a9f037f --- /dev/null +++ b/univ_ckpt_new/zero/11.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5f7e05a336689de292f8c9e229789e9a3a2aeb4952a616c713b5819337b6fca +size 50332907 diff --git a/univ_ckpt_new/zero/11.attention.query_key_value.weight/fp32.pt b/univ_ckpt_new/zero/11.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ef216617ef5d5628c36d5be475677b1e770b64c7 --- /dev/null +++ b/univ_ckpt_new/zero/11.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d86fed872f6a5de436b9a2e039d061195c8a6abd71f1ed877c8a5e5d6709b17 +size 50332813 diff --git a/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..9e5b01ed9d27f44cce991295cc33f22780d3591e --- /dev/null +++ b/univ_ckpt_new/zero/11.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f4bb6f98664cbc555cb8dc527a69ce6f42c224db98f1010870e155bb970f391 +size 33555597 diff --git a/univ_ckpt_new/zero/12.input_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/12.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/12.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/12.post_attention_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/12.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/12.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/12.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/12.post_attention_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/12.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/12.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt b/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..05bc287c5388d12a465d1b2c14ee472371fb702b --- /dev/null +++ b/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb6b0d3816a7dd8480ed3bf4d860e5cc5769c82e1c2258f73d6a022ca4fb129 +size 33555676 diff --git a/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..46024622d3fd9c1170110f03c7f78475a7a933f0 --- /dev/null +++ b/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f3772c838b6a1244ebfa426fe3b119375a919cad4b97787a32327610be3ead +size 33555691 diff --git a/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/fp32.pt b/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d6964432a431a14292fae7fb2192a094baf8be9e --- /dev/null +++ b/univ_ckpt_new/zero/13.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b873b57354b38b79591b004b0dec14441e7039d568b6be7a573bec1bd04d5554 +size 33555597 diff --git a/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt b/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..05bc287c5388d12a465d1b2c14ee472371fb702b --- /dev/null +++ b/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb6b0d3816a7dd8480ed3bf4d860e5cc5769c82e1c2258f73d6a022ca4fb129 +size 33555676 diff --git a/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..46024622d3fd9c1170110f03c7f78475a7a933f0 --- /dev/null +++ b/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f3772c838b6a1244ebfa426fe3b119375a919cad4b97787a32327610be3ead +size 33555691 diff --git a/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/fp32.pt b/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..451a7886c8967dc10bcb70d94fbc75d0b1c89ef4 --- /dev/null +++ b/univ_ckpt_new/zero/14.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:733d8ec1636ddafd4e39a5755c697bbe61a2130ae5dc1df42b2c2901bc003f36 +size 33555597 diff --git a/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6773f36295bd22a9bf02d3d62b934e7b82eda34b --- /dev/null +++ b/univ_ckpt_new/zero/14.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4f61ec3eaf74bddeb15ef36a8a4fa5c7c935690be75f660e4fe7632072e06c8 +size 33555597 diff --git a/univ_ckpt_new/zero/15.attention.query_key_value.weight/exp_avg.pt b/univ_ckpt_new/zero/15.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..adc0edc2492bc504b35a973a133a2479bcd5d3a3 --- /dev/null +++ b/univ_ckpt_new/zero/15.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76691d43fea6c7c10abc14ffaf54a9ae8066865ced294aa223188fe378f9b99b +size 50332892 diff --git a/univ_ckpt_new/zero/15.attention.query_key_value.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/15.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..26383e2be4b6b92a52367b816c3ae73a7a9f037f --- /dev/null +++ b/univ_ckpt_new/zero/15.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5f7e05a336689de292f8c9e229789e9a3a2aeb4952a616c713b5819337b6fca +size 50332907 diff --git a/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt b/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/fp32.pt b/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..cc73e2ea084e24683918373a45b2531ec67a51a3 --- /dev/null +++ b/univ_ckpt_new/zero/16.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24dc8de1740b6328114ac543414a7333b64a45337f7b397f99286e9aa8f1fe7a +size 33555597 diff --git a/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..30144775db48d0541216e25330f086c30f852697 --- /dev/null +++ b/univ_ckpt_new/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5607e61c2b0b31829d6705c73867c668befde5ffca3388f63525fa6f34b027b9 +size 33555597 diff --git a/univ_ckpt_new/zero/19.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/19.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/19.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/19.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/19.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/19.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/19.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/19.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..07aeb27148746b078f4a712a9cd5363256e6ec46 --- /dev/null +++ b/univ_ckpt_new/zero/19.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6230dacba796437438a5ece3a20ed6988289760408c892862ce1adfdb8b316d0 +size 16778381 diff --git a/univ_ckpt_new/zero/19.attention.query_key_value.weight/exp_avg.pt b/univ_ckpt_new/zero/19.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..adc0edc2492bc504b35a973a133a2479bcd5d3a3 --- /dev/null +++ b/univ_ckpt_new/zero/19.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76691d43fea6c7c10abc14ffaf54a9ae8066865ced294aa223188fe378f9b99b +size 50332892 diff --git a/univ_ckpt_new/zero/19.attention.query_key_value.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/19.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..26383e2be4b6b92a52367b816c3ae73a7a9f037f --- /dev/null +++ b/univ_ckpt_new/zero/19.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5f7e05a336689de292f8c9e229789e9a3a2aeb4952a616c713b5819337b6fca +size 50332907 diff --git a/univ_ckpt_new/zero/19.attention.query_key_value.weight/fp32.pt b/univ_ckpt_new/zero/19.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..86d9ddd4cf477986d1b31757d66e534e72dd3231 --- /dev/null +++ b/univ_ckpt_new/zero/19.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f24f7ae4131290aac3e6479040a0bed2d89df1a1336229844492beee43b16d7d +size 50332813 diff --git a/univ_ckpt_new/zero/20.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/20.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/20.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/20.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/20.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/20.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/20.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/20.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4179b19a3487727900cebb7d9a3dbab547323881 --- /dev/null +++ b/univ_ckpt_new/zero/20.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd40cfcf68091761d108cccd5bc67c5b6e4692255dc99a43496dbc1ea2d29bf0 +size 16778381 diff --git a/univ_ckpt_new/zero/20.final_rmsnorm.weight/exp_avg.pt b/univ_ckpt_new/zero/20.final_rmsnorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c010ab0921da45db63f8db9312c7d04a29136309 --- /dev/null +++ b/univ_ckpt_new/zero/20.final_rmsnorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08a96416d11485842ba285e65ecbd6396a148e5f68b34052f2d33c9c61d28ad6 +size 21660 diff --git a/univ_ckpt_new/zero/20.final_rmsnorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/20.final_rmsnorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..118e793a3eee060b3cd317150047093ac689f5f9 --- /dev/null +++ b/univ_ckpt_new/zero/20.final_rmsnorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:705ff156694c71999b6d5d668bb3d19978e5358bf5e922376eb94fecee89bee3 +size 21675 diff --git a/univ_ckpt_new/zero/20.final_rmsnorm.weight/fp32.pt b/univ_ckpt_new/zero/20.final_rmsnorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..96de736a7a342e3f7d8c10ab8dfbeb1ab925e6a4 --- /dev/null +++ b/univ_ckpt_new/zero/20.final_rmsnorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad28a0b7facc4620044ef01db17285e5ab42275c36f2d0ca8bb2ff8fff3e9be1 +size 21581 diff --git a/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt b/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..05bc287c5388d12a465d1b2c14ee472371fb702b --- /dev/null +++ b/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb6b0d3816a7dd8480ed3bf4d860e5cc5769c82e1c2258f73d6a022ca4fb129 +size 33555676 diff --git a/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..46024622d3fd9c1170110f03c7f78475a7a933f0 --- /dev/null +++ b/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f3772c838b6a1244ebfa426fe3b119375a919cad4b97787a32327610be3ead +size 33555691 diff --git a/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/fp32.pt b/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6eb740132c01488972fcf1dd8fcdff8eb448bbc9 --- /dev/null +++ b/univ_ckpt_new/zero/21.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467b6f902cae830c4ee53b01674dbc91cec100531797c4c26e41a02c8eef9509 +size 33555597 diff --git a/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ce2fc73bf1ccdfd4853067b0e44dede3af4071b0 --- /dev/null +++ b/univ_ckpt_new/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6adb3b28ba396ee9ea82a55c46e5cc89b06f654a9ce98e49d763fe3a7c7c827e +size 33555597 diff --git a/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt b/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/fp32.pt b/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b80a8257e0a09c5adcb3195037c15e036050c3ff --- /dev/null +++ b/univ_ckpt_new/zero/22.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7986acd0064596982f21f11ebae70d1d231418dd0205402fcdd9b0ab2cc78ce +size 33555597 diff --git a/univ_ckpt_new/zero/23.post_attention_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/23.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/23.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/23.post_attention_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/23.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/23.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/24.input_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/24.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/24.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/24.input_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/24.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/24.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/24.input_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/24.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/24.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/25.post_attention_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/25.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/25.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/25.post_attention_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/25.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/25.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/26.post_attention_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/26.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/26.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/26.post_attention_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/26.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/26.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/exp_avg.pt b/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..3e274ac9c65df8a0852554901ddcf79256708ec3 --- /dev/null +++ b/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bec900a61630d5c22ecdfb17813f8d40de3dd7639c0debef510314e4521f312 +size 415237340 diff --git a/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4ce3fa1f663675d7d194fd79c1b747a206b4f4f2 --- /dev/null +++ b/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f363409b9aef1988f79208eef0268bc1c09f3aa558dfc609e68810c0c56c697c +size 415237355 diff --git a/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/fp32.pt b/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec07c6db4dbff28261a03ce40dd2896f4dccc825 --- /dev/null +++ b/univ_ckpt_new/zero/29.vocab_parallel_projection.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0011e94a0bb35280fd855e086dad867973e9ae2406910e29bb4bb3da15049a7 +size 415237261 diff --git a/univ_ckpt_new/zero/3.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/3.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/3.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/3.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/3.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/3.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/3.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/3.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6bef8a1289bed7a9e34093117c9286a0154ce5e3 --- /dev/null +++ b/univ_ckpt_new/zero/3.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f39dbd49628add514d78470652d344d304edc95ea6609cdf9649038a145508 +size 16778381 diff --git a/univ_ckpt_new/zero/3.attention.query_key_value.weight/exp_avg.pt b/univ_ckpt_new/zero/3.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..adc0edc2492bc504b35a973a133a2479bcd5d3a3 --- /dev/null +++ b/univ_ckpt_new/zero/3.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76691d43fea6c7c10abc14ffaf54a9ae8066865ced294aa223188fe378f9b99b +size 50332892 diff --git a/univ_ckpt_new/zero/3.attention.query_key_value.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/3.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..26383e2be4b6b92a52367b816c3ae73a7a9f037f --- /dev/null +++ b/univ_ckpt_new/zero/3.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5f7e05a336689de292f8c9e229789e9a3a2aeb4952a616c713b5819337b6fca +size 50332907 diff --git a/univ_ckpt_new/zero/3.attention.query_key_value.weight/fp32.pt b/univ_ckpt_new/zero/3.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c18d5273ad3a4d9f3a9a0fe2cad46cdf3ab29471 --- /dev/null +++ b/univ_ckpt_new/zero/3.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f3060c49f9fa3212830934aff2f0b8c78502cc1ffca787413f4b5d0f0bcab97 +size 50332813 diff --git a/univ_ckpt_new/zero/3.input_layernorm.weight/exp_avg.pt b/univ_ckpt_new/zero/3.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ec579bcadc579a68f8c1a5f28f22dfe5e1116601 --- /dev/null +++ b/univ_ckpt_new/zero/3.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99aee420f3ec83a8a8cd03fbeef6b5a68f1faf74925eb7d1d47a9f9c0c354d7c +size 9372 diff --git a/univ_ckpt_new/zero/3.input_layernorm.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/3.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..71d2d91c0ab9219ce7098092e426846bd04ee251 --- /dev/null +++ b/univ_ckpt_new/zero/3.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cc0fbd863a0583c3b5ba3ef48abc55f811b87da11da110150dbb687c7318e97 +size 9387 diff --git a/univ_ckpt_new/zero/3.input_layernorm.weight/fp32.pt b/univ_ckpt_new/zero/3.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f2b2cbae0399e844277266d68646ebc441508ba8 --- /dev/null +++ b/univ_ckpt_new/zero/3.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37c1b831276ffbe93b5d4a98389a155e40d6de816423e38c4bb65f71726783c +size 9293 diff --git a/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/fp32.pt b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5ccfef4bf154144bd44d69141a15340a647e94a4 --- /dev/null +++ b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f08ec95757ca4bd0823cb583bc43e16041afcd36c96c8061486df0549968c23 +size 33555597 diff --git a/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..029612d3c78a393a69a1e622bcc4d5b5df0cc925 --- /dev/null +++ b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35de7e06be7be1e1d1aa4204e41523f164552c3d126ab7594bd472515646271b +size 33555676 diff --git a/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..82fb4618c165dd3416847f00c0fd3f7907bf547a --- /dev/null +++ b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9904014825d1dce2a31b0f13cb5ed5cc9023b832f011fa2c009e6ec60ff81d7 +size 33555691 diff --git a/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4ae289f15bd28596258f6bcc0e9c8170caa6e6b5 --- /dev/null +++ b/univ_ckpt_new/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35ea0889fd097cd40325be60ba66c991eb7205fbf5b95185ca9e2b097e4721b1 +size 33555597 diff --git a/univ_ckpt_new/zero/4.mlp.dense_4h_to_h.weight/fp32.pt b/univ_ckpt_new/zero/4.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a949087dd2c7bcee0039ecf3e8e206dae327bfb0 --- /dev/null +++ b/univ_ckpt_new/zero/4.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b412f08b7c0599bde2a23990bc0d81b98bee744d5c89c377950d1ef40265b1f +size 33555597 diff --git a/univ_ckpt_new/zero/7.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/7.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/7.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/7.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/7.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/7.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/7.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/7.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b7ed4be882c3c8e44d59bfe5aed8450633769407 --- /dev/null +++ b/univ_ckpt_new/zero/7.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70321b1aab328b0f48ef0a6792fb0b332b6bd686b94d62560e3b5093d2774b7a +size 16778381 diff --git a/univ_ckpt_new/zero/8.attention.dense.weight/exp_avg.pt b/univ_ckpt_new/zero/8.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..56fc78cfc081d27e651bbff92d28935e366e8cde --- /dev/null +++ b/univ_ckpt_new/zero/8.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8456b875deb014a426c31f3458ee2d9497070b7bf04c0c7580312b34dd71f2e8 +size 16778460 diff --git a/univ_ckpt_new/zero/8.attention.dense.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/8.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..094807677d9d6d9bc08f15573befb64be7f140c1 --- /dev/null +++ b/univ_ckpt_new/zero/8.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111d2b98ad68ac0d7cbf1d7d1442c7929e09e25658d1d44877ea88846d407ef0 +size 16778475 diff --git a/univ_ckpt_new/zero/8.attention.dense.weight/fp32.pt b/univ_ckpt_new/zero/8.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c6b59eae0966422c562d878e4f20d2ee02dbd22d --- /dev/null +++ b/univ_ckpt_new/zero/8.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9f5eae6c02d4ea87e935e62d0039217fc75020de57ee433d215be5d22336750 +size 16778381 diff --git a/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt b/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..05bc287c5388d12a465d1b2c14ee472371fb702b --- /dev/null +++ b/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb6b0d3816a7dd8480ed3bf4d860e5cc5769c82e1c2258f73d6a022ca4fb129 +size 33555676 diff --git a/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..46024622d3fd9c1170110f03c7f78475a7a933f0 --- /dev/null +++ b/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f3772c838b6a1244ebfa426fe3b119375a919cad4b97787a32327610be3ead +size 33555691 diff --git a/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/fp32.pt b/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..17f8d632971992271bf99dfd6c0fb30b10724fb6 --- /dev/null +++ b/univ_ckpt_new/zero/8.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8f4de220bfc2cd336a6b19b6083bdfb94b57225a227691b483b615cccec90bc +size 33555597 diff --git a/univ_ckpt_new/zero/9.attention.query_key_value.weight/exp_avg.pt b/univ_ckpt_new/zero/9.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..adc0edc2492bc504b35a973a133a2479bcd5d3a3 --- /dev/null +++ b/univ_ckpt_new/zero/9.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76691d43fea6c7c10abc14ffaf54a9ae8066865ced294aa223188fe378f9b99b +size 50332892 diff --git a/univ_ckpt_new/zero/9.attention.query_key_value.weight/exp_avg_sq.pt b/univ_ckpt_new/zero/9.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..26383e2be4b6b92a52367b816c3ae73a7a9f037f --- /dev/null +++ b/univ_ckpt_new/zero/9.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5f7e05a336689de292f8c9e229789e9a3a2aeb4952a616c713b5819337b6fca +size 50332907