|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:35:41,775] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
[2024-04-17 13:36:22,312] [INFO] [runner.py:585:main] cmd = /usr/bin/python3 -u -m deepspeed.launcher.launch --world_info=eyIxMDAuODMuMTM0LjE1OCI6IFswLCAxLCAyLCAzLCA0LCA1LCA2LCA3XX0= --master_addr=100.83.134.158 --master_port=29500 --no_python --no_local_rank --enable_each_rank_log=None /usr/bin/bash -c cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 16 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 100 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_x//tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_x//checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_x//ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_x//checkpoints_zero_stage_2 --hf-save /data/output/llama13b_x//hf_ckpt --save-interval 100 --verify-checkpoint --verify-checkpoint-model-type LLAMA |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:23,803] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
[2024-04-17 13:36:24,923] [INFO] [launch.py:146:main] WORLD INFO DICT: {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7]} |
|
[2024-04-17 13:36:24,923] [INFO] [launch.py:152:main] nnodes=1, num_local_procs=8, node_rank=0 |
|
[2024-04-17 13:36:24,923] [INFO] [launch.py:163:main] global_rank_mapping=defaultdict(<class 'list'>, {'100.83.134.158': [0, 1, 2, 3, 4, 5, 6, 7]}) |
|
[2024-04-17 13:36:24,923] [INFO] [launch.py:164:main] dist_world_size=8 |
|
[2024-04-17 13:36:24,923] [INFO] [launch.py:166:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,736] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,736] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,798] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,801] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,864] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,902] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,912] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
[2024-04-17 13:36:26,977] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
using world size: 8, data-parallel-size: 8, tensor-model-parallel size: 1, pipeline-model-parallel size: 1 |
|
accumulate and all-reduce gradients in fp32 for bfloat16 data type. |
|
using torch.bfloat16 for parameters ... |
|
------------------------ arguments ------------------------ |
|
accumulate_allreduce_grads_in_fp32 .............. True |
|
activation_func_type ............................ swiglu |
|
adam_beta1 ...................................... 0.9 |
|
adam_beta2 ...................................... 0.95 |
|
adam_eps ........................................ 1e-06 |
|
adlr_autoresume ................................. False |
|
adlr_autoresume_interval ........................ 1000 |
|
aml_data_download_path .......................... None |
|
apply_layernorm_weight_plus_one ................. False |
|
apply_query_key_layer_scaling ................... True |
|
apply_residual_connection_post_layernorm ........ False |
|
attention_dropout ............................... 0.1 |
|
attention_softmax_in_fp32 ....................... False |
|
bert_binary_head ................................ True |
|
bert_load ....................................... None |
|
bf16 ............................................ True |
|
bias_dropout_fusion ............................. False |
|
bias_gelu_fusion ................................ False |
|
biencoder_projection_dim ........................ 0 |
|
biencoder_shared_query_context_model ............ False |
|
block_data_path ................................. None |
|
cache_fp8_weight ................................ False |
|
cache_fp8_weight_fwd ............................ True |
|
checkpoint_activations .......................... False |
|
checkpoint_activations_granularity .............. full |
|
checkpoint_in_cpu ............................... False |
|
checkpoint_num_layers ........................... 1 |
|
clearml_config_path ............................. None |
|
clearml_continue_exp ............................ False |
|
clearml_exp_name ................................ None |
|
clip_grad ....................................... 1.0 |
|
compression_training ............................ False |
|
consumed_train_samples .......................... 0 |
|
consumed_train_tokens ........................... 0 |
|
consumed_valid_samples .......................... 0 |
|
contigious_checkpointing ........................ False |
|
cpu_optimizer ................................... False |
|
cpu_torch_adam .................................. False |
|
create_moe_param_group .......................... False |
|
curriculum_learning ............................. False |
|
data_idx_path ................................... None |
|
data_impl ....................................... infer |
|
data_parallel_size .............................. 8 |
|
data_path ....................................... ['/data/arxiv//tokenized_text_document'] |
|
data_sharding ................................... True |
|
dataloader_type ................................. single |
|
DDP_impl ........................................ local |
|
decoder_seq_length .............................. None |
|
deepscale ....................................... False |
|
deepscale_config ................................ None |
|
deepspeed ....................................... True |
|
deepspeed_activation_checkpointing .............. False |
|
deepspeed_config ................................ /data/output/llama13b_x//ds_config.json |
|
deepspeed_mpi ................................... False |
|
distribute_checkpointed_activations ............. False |
|
distributed_backend ............................. hccl |
|
do_layernorm_bias_weight_decay .................. False |
|
do_pretrain_validation .......................... False |
|
ds_inference .................................... False |
|
ds_pipeline_enabled ............................. True |
|
embed_layernorm ................................. False |
|
embedding_path .................................. None |
|
enable_expert_tensor_parallelism ................ False |
|
encoder_seq_length .............................. 2048 |
|
eod_mask_loss ................................... False |
|
eval_interval ................................... 100 |
|
eval_iters ...................................... 10 |
|
eval_loss_exit_value ............................ None |
|
eval_micro_batch_size ........................... 1 |
|
evidence_data_path .............................. None |
|
exit_duration_in_mins ........................... None |
|
exit_interval ................................... 0 |
|
expert_interval ................................. 2 |
|
ffn_hidden_coeff ................................ 2.6666666666666665 |
|
ffn_hidden_size ................................. 13824 |
|
finetune ........................................ False |
|
fix_position_emb_redundant_alloc ................ False |
|
flatten_linear_operands ......................... False |
|
fp16 ............................................ False |
|
fp16_lm_cross_entropy ........................... False |
|
fp32_residual_connection ........................ False |
|
global_batch_size ............................... 256 |
|
hf_save ......................................... /data/output/llama13b_x//hf_ckpt |
|
hidden_dropout .................................. 0.1 |
|
hidden_size ..................................... 5120 |
|
hidden_size_teacher ............................. None |
|
hpu_deterministic ............................... True |
|
hpu_fp8_format .................................. e5m2 |
|
hpu_fp8_measure_interval ........................ 10 |
|
hysteresis ...................................... 2 |
|
ict_head_size ................................... None |
|
ict_load ........................................ None |
|
img_dim ......................................... 224 |
|
indexer_batch_size .............................. 128 |
|
indexer_log_interval ............................ 1000 |
|
inference ....................................... False |
|
init_method_std ................................. 0.02 |
|
init_method_xavier_uniform ...................... False |
|
initial_loss_scale .............................. 4294967296 |
|
kd .............................................. False |
|
kd_alpha_ce ..................................... 1 |
|
kd_beta_ce ...................................... 1 |
|
kd_temp ......................................... 1.0 |
|
kill_switch_path ................................ None |
|
kv_channels ..................................... 128 |
|
layernorm_epsilon ............................... 1e-06 |
|
layernorm_type .................................. rmsnorm |
|
lazy_mpu_init ................................... None |
|
load ............................................ /data/output/llama13b_x//checkpoints_zero_stage_2 |
|
load_teacher .................................... None |
|
local_rank ...................................... 0 |
|
log_batch_size_to_tensorboard ................... True |
|
log_bwd_grads ................................... False |
|
log_fwd_activations ............................. False |
|
log_interval .................................... 10 |
|
log_learning_rate_to_tensorboard ................ True |
|
log_loss_scale_to_tensorboard ................... True |
|
log_model_inputs ................................ False |
|
log_num_zeros_in_grad ........................... False |
|
log_optimizer_states_to_tensorboard ............. False |
|
log_params_norm ................................. False |
|
log_timers_to_tensorboard ....................... True |
|
log_validation_ppl_to_tensorboard ............... True |
|
loss_scale ...................................... None |
|
loss_scale_window ............................... 1000 |
|
lr .............................................. 0.0003 |
|
lr_decay_iters .................................. None |
|
lr_decay_samples ................................ None |
|
lr_decay_style .................................. cosine |
|
lr_decay_tokens ................................. None |
|
lr_warmup_fraction .............................. None |
|
lr_warmup_iters ................................. 2000 |
|
lr_warmup_samples ............................... 0 |
|
lr_warmup_tokens ................................ None |
|
make_vocab_size_divisible_by .................... 128 |
|
mask_prob ....................................... 0.15 |
|
mask_tensor_adding .............................. False |
|
masked_softmax_fusion ........................... False |
|
max_position_embeddings ......................... None |
|
memory_centric_tiled_linear ..................... False |
|
merge_file ...................................... /data/arxiv//gpt2-merges.txt |
|
micro_batch_size ................................ 1 |
|
min_loss_scale .................................. 1.0 |
|
min_lr .......................................... 0.0 |
|
mlp_type ........................................ standard |
|
mmap_warmup ..................................... False |
|
moe_eval_capacity_factor ........................ 1.0 |
|
moe_expert_parallel_size ........................ 1 |
|
moe_loss_coeff .................................. 0.1 |
|
moe_min_capacity ................................ 4 |
|
moe_token_dropping .............................. True |
|
moe_train_capacity_factor ....................... 1.0 |
|
mos ............................................. False |
|
no_bias ......................................... True |
|
no_cuda ......................................... False |
|
no_load_lr_state ................................ False |
|
no_load_optim ................................... None |
|
no_load_rng ..................................... None |
|
no_pipeline_parallel ............................ False |
|
no_save_optim ................................... None |
|
no_save_rng ..................................... None |
|
no_scaled_init .................................. False |
|
num_attention_heads ............................. 40 |
|
num_attention_heads_teacher ..................... None |
|
num_channels .................................... 3 |
|
num_classes ..................................... 1000 |
|
num_experts ..................................... [1] |
|
num_experts_teacher ............................. [1] |
|
num_key_value_heads ............................. 40 |
|
num_layers ...................................... 16 |
|
num_layers_per_virtual_pipeline_stage ........... None |
|
num_layers_teacher .............................. None |
|
num_workers ..................................... 2 |
|
onnx_safe ....................................... None |
|
openai_gelu ..................................... False |
|
optimizer ....................................... adamw |
|
override_lr_scheduler ........................... False |
|
params_dtype .................................... torch.bfloat16 |
|
partition_activations ........................... False |
|
patch_dim ....................................... 16 |
|
pipeline_model_parallel_size .................... 1 |
|
position_embedding_type ......................... PositionEmbeddingType.rotary |
|
profile ......................................... None |
|
profile_backward ................................ False |
|
profile_steps ................................... 2,3 |
|
query_in_block_prob ............................. 0.1 |
|
rampup_batch_size ............................... None |
|
rank ............................................ 0 |
|
remote_device ................................... none |
|
reset_attention_mask ............................ False |
|
reset_iteration ................................. False |
|
reset_position_ids .............................. False |
|
retriever_report_topk_accuracies ................ [] |
|
retriever_score_scaling ......................... False |
|
retriever_seq_length ............................ 256 |
|
sample_rate ..................................... 1.0 |
|
save ............................................ /data/output/llama13b_x//checkpoints_zero_stage_2 |
|
save_interval ................................... 100 |
|
scatter_gather_tensors_in_pipeline .............. True |
|
scattered_embeddings ............................ False |
|
seed ............................................ 1234 |
|
seq_length ...................................... 2048 |
|
sequence_parallel ............................... False |
|
sgd_momentum .................................... 0.9 |
|
short_seq_prob .................................. 0.1 |
|
skip_train ...................................... False |
|
split ........................................... 969, 30, 1 |
|
split_transformers .............................. False |
|
synchronize_each_layer .......................... False |
|
tensor_logger_max_iter .......................... 0 |
|
tensor_logger_path .............................. None |
|
tensor_model_parallel_size ...................... 1 |
|
tensorboard_dir ................................. /data/output/llama13b_x//tensorboard |
|
tensorboard_log_interval ........................ 1 |
|
tensorboard_queue_size .......................... 1000 |
|
test_data_path .................................. None |
|
tile_factor ..................................... 1 |
|
titles_data_path ................................ None |
|
tokenizer_eod_id ................................ None |
|
tokenizer_model_file ............................ None |
|
tokenizer_type .................................. GPT2BPETokenizer |
|
topk ............................................ 1 |
|
train_data_path ................................. None |
|
train_iters ..................................... 10000 |
|
train_samples ................................... None |
|
train_tokens .................................... None |
|
universal_checkpoint ............................ False |
|
use_checkpoint_lr_scheduler ..................... False |
|
use_contiguous_buffers_in_ddp ................... True |
|
use_cpu_initialization .......................... None |
|
use_fused_sdpa .................................. True |
|
use_fused_sdpa_with_recompute ................... False |
|
use_hpu ......................................... True |
|
use_hpu_fp8_transformer_engine .................. False |
|
use_hpu_graphs .................................. False |
|
use_one_sent_docs ............................... False |
|
use_pin_memory .................................. False |
|
use_rotary_v2 ................................... False |
|
use_seq_len_plus_one_tokens ..................... True |
|
use_torch_compile ............................... False |
|
use_tutel ....................................... False |
|
valid_data_path ................................. None |
|
verify_checkpoint ............................... True |
|
verify_checkpoint_model_type .................... LLAMA |
|
verify_tp_workers ............................... False |
|
verify_tp_workers_hash .......................... False |
|
virtual_pipeline_model_parallel_size ............ None |
|
vocab_extra_ids ................................. 0 |
|
vocab_file ...................................... /data/arxiv//gpt2-vocab.json |
|
weight_decay .................................... 0.1 |
|
world_size ...................................... 8 |
|
zero_allgather_bucket_size ...................... 0.0 |
|
zero_contigious_gradients ....................... False |
|
zero_reduce_bucket_size ......................... 0.0 |
|
zero_reduce_scatter ............................. False |
|
zero_stage ...................................... 0 |
|
-------------------- end of arguments --------------------- |
|
setting number of micro-batches to constant 32 |
|
setting number of micro-batches to constant 32 |
|
> building GPT2BPETokenizer tokenizer ... |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 2 |
|
args.world_size: 8 |
|
args.rank: 2 |
|
args.distributed_backend: hccl |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 4 |
|
args.world_size: 8 |
|
args.rank: 4 |
|
args.distributed_backend: hccl |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
> padded vocab (size: 50257) with 47 dummy tokens (new size: 50304) |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 0 |
|
args.world_size: 8 |
|
args.rank: 0 |
|
args.distributed_backend: hccl |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
[2024-04-17 13:36:29,411] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,411] [INFO] [comm.py:637:init_distributed] cdb=None |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
[2024-04-17 13:36:29,416] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,416] [INFO] [comm.py:637:init_distributed] cdb=None |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 1 |
|
args.world_size: 8 |
|
args.rank: 1 |
|
args.distributed_backend: hccl |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 6 |
|
args.world_size: 8 |
|
args.rank: 6 |
|
args.distributed_backend: hccl |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
-------------------------------------------------- |
|
DeepSpeed C++/CUDA extension op report |
|
-------------------------------------------------- |
|
NOTE: Ops not installed will be just-in-time (JIT) compiled at |
|
runtime if needed. Op compatibility means that your system |
|
meet the required dependencies to JIT install the op. |
|
-------------------------------------------------- |
|
JIT compiled ops requires ninja |
|
ninja .................. [92m[OKAY][0m |
|
-------------------------------------------------- |
|
op name ................ installed .. compatible |
|
-------------------------------------------------- |
|
cpu_adam ............... [93m[NO][0m ....... [92m[OKAY][0m |
|
fused_adam ............. [93m[NO][0m ....... [92m[OKAY][0m |
|
deepspeed_not_implemented [93m[NO][0m ....... [92m[OKAY][0m |
|
transformer_inference .. [93m[NO][0m ....... [92m[OKAY][0m |
|
-------------------------------------------------- |
|
DeepSpeed general environment info: |
|
torch install path ............... ['/usr/local/lib/python3.10/dist-packages/torch'] |
|
torch version .................... 2.1.1a0+gitb51c9f6 |
|
deepspeed install path ........... ['/usr/local/lib/python3.10/dist-packages/deepspeed'] |
|
deepspeed info ................... 0.12.4+hpu.synapse.v1.14.0, fad45b2, 1.14.0 |
|
deepspeed wheel compiled w. ...... torch 2.1 |
|
shared memory (/dev/shm) size .... 503.72 GB |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
> initializing torch distributed ... |
|
[2024-04-17 13:36:29,471] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,471] [INFO] [comm.py:637:init_distributed] cdb=None |
|
[2024-04-17 13:36:29,471] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend hccl |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
fatal: detected dubious ownership in repository at '/Model-References' |
|
To add an exception for this directory, call: |
|
|
|
git config --global --add safe.directory /Model-References |
|
[2024-04-17 13:36:29,495] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,496] [INFO] [comm.py:637:init_distributed] cdb=None |
|
**** Git info for Megatron: git_hash=unknown git_branch=unknown **** |
|
> setting tensorboard ... |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 7 |
|
args.world_size: 8 |
|
args.rank: 7 |
|
args.distributed_backend: hccl |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
[2024-04-17 13:36:29,532] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,532] [INFO] [comm.py:637:init_distributed] cdb=None |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 3 |
|
args.world_size: 8 |
|
args.rank: 3 |
|
args.distributed_backend: hccl |
|
_initialize_distributed: Initializing with below params: |
|
args.local_rank: 5 |
|
args.world_size: 8 |
|
args.rank: 5 |
|
args.distributed_backend: hccl |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
[2024-04-17 13:36:29,568] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,568] [INFO] [comm.py:637:init_distributed] cdb=None |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
[2024-04-17 13:36:29,609] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,610] [INFO] [comm.py:637:init_distributed] cdb=None |
|
/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. |
|
warnings.warn( |
|
hccl device_count: 8 |
|
[2024-04-17 13:36:29,627] [WARNING] [comm.py:163:init_deepspeed_backend] HCCL backend in DeepSpeed not yet implemented |
|
[2024-04-17 13:36:29,627] [INFO] [comm.py:637:init_distributed] cdb=None |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
Traceback (most recent call last): |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[E socket.cpp:922] [c10d] The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
Traceback (most recent call last): |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/./pretrain_llama.py", line 110, in <module> |
|
pretrain(train_valid_test_datasets_provider, model_provider, forward_step, extra_args_provider=llama_argument_handler, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/training.py", line 162, in pretrain |
|
initialize_megatron(extra_args_provider=extra_args_provider, |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 95, in initialize_megatron |
|
finish_mpu_init() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 77, in finish_mpu_init |
|
_initialize_distributed() |
|
File "/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/initialize.py", line 276, in _initialize_distributed |
|
deepspeed.init_distributed(dist_backend=args.distributed_backend) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/comm.py", line 670, in init_distributed |
|
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 120, in __init__ |
|
self.init_process_group(backend, timeout, init_method, rank, world_size) |
|
File "/usr/local/lib/python3.10/dist-packages/deepspeed/comm/torch.py", line 148, in init_process_group |
|
torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method) |
|
File "/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/core/torch_overwrites.py", line 259, in wrap_init_process_group |
|
return init_process_group_orig( |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/c10d_logger.py", line 74, in wrapper |
|
func_return = func(*args, **kwargs) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/distributed_c10d.py", line 1141, in init_process_group |
|
store, rank, world_size = next(rendezvous_iterator) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 241, in _env_rendezvous_handler |
|
store = _create_c10d_store(master_addr, master_port, rank, world_size, timeout) |
|
File "/usr/local/lib/python3.10/dist-packages/torch/distributed/rendezvous.py", line 172, in _create_c10d_store |
|
return TCPStore( |
|
TimeoutError: The client socket has timed out after 1800s while trying to connect to (100.83.134.158, 29500). |
|
[2024-04-17 14:06:29,834] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2748 |
|
[2024-04-17 14:06:29,931] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2749 |
|
[2024-04-17 14:06:29,931] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2750 |
|
[2024-04-17 14:06:29,932] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2751 |
|
[2024-04-17 14:06:29,984] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2752 |
|
[2024-04-17 14:06:29,985] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2753 |
|
[2024-04-17 14:06:30,037] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2754 |
|
[2024-04-17 14:06:30,037] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 2755 |
|
[2024-04-17 14:06:30,037] [ERROR] [launch.py:322:sigkill_handler] ['/usr/bin/bash', '-c', ' cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed && python3 -u ./pretrain_llama.py --deepspeed --tensor-model-parallel-size 1 --pipeline-model-parallel-size 1 --position-embedding-type rotary --no-bias --layernorm-type rmsnorm --activation-func-type swiglu --layernorm-epsilon 1e-6 --num-layers 16 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --seq-length 2048 --micro-batch-size 1 --global-batch-size 256 --train-iters 10000 --log-interval 10 --eval-iters 10 --eval-interval 100 --data-path /data/arxiv//tokenized_text_document --vocab-file /data/arxiv//gpt2-vocab.json --merge-file /data/arxiv//gpt2-merges.txt --optimizer adamw --adam-beta1 0.9 --adam-beta2 0.95 --adam-eps 1e-6 --lr 3e-4 --lr-decay-style cosine --lr-warmup-iters 2000 --clip-grad 1.0 --weight-decay 0.1 --attention-dropout 0.1 --hidden-dropout 0.1 --tensorboard-dir /data/output/llama13b_x//tensorboard --log-validation-ppl-to-tensorboard --log-batch-size-to-tensorboard --log-timers-to-tensorboard --load /data/output/llama13b_x//checkpoints_zero_stage_2 --deepspeed_config=/data/output/llama13b_x//ds_config.json --zero-stage=0 --exit-interval 0 --no-masked-softmax-fusion --no-bias-gelu-fusion --no-bias-dropout-fusion --use-torch-compile false --use-fused-sdpa true --use-fused-sdpa-with-recompute false --bf16 --use_hpu --distributed-backend=hccl --hpu-deterministic --sequence-parallel --save /data/output/llama13b_x//checkpoints_zero_stage_2 --hf-save /data/output/llama13b_x//hf_ckpt --save-interval 100 --verify-checkpoint --verify-checkpoint-model-type LLAMA'] exits with return code = 1 |
|
|