applied-ai-018 commited on
Commit
c032b9d
·
verified ·
1 Parent(s): 1a40fb1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/README.md +5 -0
  2. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/download_books.sh +2 -0
  3. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/download_ckpt.sh +8 -0
  4. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/download_vocab.sh +2 -0
  5. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/fp16_deprecated/loss_scaler.py +26 -0
  6. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/__init__.cpython-310.pyc +0 -0
  7. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/bert_model.cpython-310.pyc +0 -0
  8. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/distributed.cpython-310.pyc +0 -0
  9. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/enums.cpython-310.pyc +0 -0
  10. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/fused_bias_gelu.cpython-310.pyc +0 -0
  11. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/fused_layer_norm.cpython-310.pyc +0 -0
  12. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/fused_softmax.cpython-310.pyc +0 -0
  13. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/gpt_model.cpython-310.pyc +0 -0
  14. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/language_model.cpython-310.pyc +0 -0
  15. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/llama_model.cpython-310.pyc +0 -0
  16. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/module.cpython-310.pyc +0 -0
  17. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/rmsnorm.cpython-310.pyc +0 -0
  18. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/rotary_pos_embedding.cpython-310.pyc +0 -0
  19. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/t5_model.cpython-310.pyc +0 -0
  20. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/transformer.cpython-310.pyc +0 -0
  21. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/utils.cpython-310.pyc +0 -0
  22. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/biencoder_model.py +329 -0
  23. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/classification.py +102 -0
  24. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/distributed.py +231 -0
  25. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/enums.py +21 -0
  26. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/fused_bias_gelu.py +43 -0
  27. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/fused_layer_norm.py +119 -0
  28. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/fused_softmax.py +213 -0
  29. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/gpt_model.py +437 -0
  30. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/language_model.py +723 -0
  31. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py +280 -0
  32. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/module.py +206 -0
  33. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/realm_model.py +204 -0
  34. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/rmsnorm.py +49 -0
  35. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/rotary_pos_embedding.py +82 -0
  36. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/__init__.py +0 -0
  37. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/__pycache__/__init__.cpython-310.pyc +0 -0
  38. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/__pycache__/knn_monitor.cpython-310.pyc +0 -0
  39. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/classification.py +86 -0
  40. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/dino.py +290 -0
  41. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/esvit_swin_backbone.py +849 -0
  42. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/inpainting.py +152 -0
  43. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/knn_monitor.py +129 -0
  44. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/mit_backbone.py +420 -0
  45. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/swin_backbone.py +625 -0
  46. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/utils.py +27 -0
  47. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/vit_backbone.py +245 -0
  48. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/mpu/tests/__init__.py +0 -0
  49. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/mpu/tests/commons.py +70 -0
  50. docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/mpu/tests/test_cross_entropy.py +96 -0
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Run the scripts below to setup dataset
2
+
3
+ bash download_books.sh
4
+
5
+ bash download_vocab.sh
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/download_books.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ wget https://the-eye.eu/public/AI/pile_neox/data/BookCorpusDataset_text_document.bin
2
+ wget https://the-eye.eu/public/AI/pile_neox/data/BookCorpusDataset_text_document.idx
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/download_ckpt.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ mkdir -p checkpoints/gpt2_345m
2
+
3
+ cd checkpoints/gpt2_345m
4
+ wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip
5
+ unzip megatron_lm_345m_v0.0.zip
6
+ rm megatron_lm_345m_v0.0.zip
7
+ cd ../..
8
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/dataset/download_vocab.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json
2
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/fp16_deprecated/loss_scaler.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """For backward compatibility, we need the class definitions to deserialize."""
4
+
5
+ class LossScaler:
6
+ def __init__(self, scale=1):
7
+ self.cur_scale = scale
8
+
9
+ class DynamicLossScaler:
10
+ def __init__(self,
11
+ init_scale=2**32,
12
+ scale_factor=2.,
13
+ scale_window=1000,
14
+ min_scale=1,
15
+ delayed_shift=1,
16
+ consecutive_hysteresis=False):
17
+ self.cur_scale = init_scale
18
+ self.cur_iter = 0
19
+ self.last_overflow_iter = -1
20
+ self.scale_factor = scale_factor
21
+ self.scale_window = scale_window
22
+ self.min_scale = min_scale
23
+ self.delayed_shift = delayed_shift
24
+ self.cur_hysteresis = delayed_shift
25
+ self.consecutive_hysteresis = consecutive_hysteresis
26
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (876 Bytes). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/bert_model.cpython-310.pyc ADDED
Binary file (6.46 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (7.03 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/enums.cpython-310.pyc ADDED
Binary file (889 Bytes). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/fused_bias_gelu.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/fused_layer_norm.cpython-310.pyc ADDED
Binary file (3.09 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/fused_softmax.cpython-310.pyc ADDED
Binary file (5.72 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/gpt_model.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/language_model.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/llama_model.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/module.cpython-310.pyc ADDED
Binary file (6.65 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/rmsnorm.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/rotary_pos_embedding.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/t5_model.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (43.4 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/biencoder_model.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import sys
4
+
5
+ from megatron import get_args, print_rank_0, get_tokenizer
6
+ from megatron.core import mpu
7
+ from megatron.checkpointing import fix_query_key_value_ordering
8
+ from megatron.checkpointing import get_checkpoint_tracker_filename
9
+ from megatron.checkpointing import get_checkpoint_name
10
+ from megatron.model.bert_model import bert_position_ids
11
+ from megatron.model.enums import AttnMaskType
12
+ from megatron.model.language_model import get_language_model
13
+ from megatron.model.utils import get_linear_layer
14
+ from megatron.model.utils import init_method_normal
15
+ from megatron.model.utils import scaled_init_method_normal
16
+ from .module import MegatronModule
17
+
18
+ def get_model_provider(only_query_model=False, only_context_model=False,
19
+ biencoder_shared_query_context_model=False):
20
+
21
+ def model_provider(pre_process=True, post_process=True):
22
+ """Build the model."""
23
+
24
+ print_rank_0('building Bienoder model ...')
25
+ model = biencoder_model_provider(only_query_model=only_query_model,
26
+ only_context_model = only_context_model,
27
+ biencoder_shared_query_context_model = \
28
+ biencoder_shared_query_context_model,
29
+ pre_process=pre_process, post_process=post_process)
30
+
31
+ return model
32
+
33
+ return model_provider
34
+
35
+
36
+ def biencoder_model_provider(only_query_model=False,
37
+ only_context_model=False,
38
+ biencoder_shared_query_context_model=False,
39
+ pre_process=True,
40
+ post_process=True):
41
+ """Build the model."""
42
+
43
+ assert mpu.get_tensor_model_parallel_world_size() == 1 and \
44
+ mpu.get_pipeline_model_parallel_world_size() == 1, \
45
+ "Model parallel size > 1 not supported for ICT"
46
+
47
+ print_rank_0('building BiEncoderModel...')
48
+
49
+ # simpler to just keep using 2 tokentypes since
50
+ # the LM we initialize with has 2 tokentypes
51
+ model = BiEncoderModel(
52
+ num_tokentypes=2,
53
+ parallel_output=False,
54
+ only_query_model=only_query_model,
55
+ only_context_model=only_context_model,
56
+ biencoder_shared_query_context_model=\
57
+ biencoder_shared_query_context_model,
58
+ pre_process=pre_process,
59
+ post_process=post_process)
60
+
61
+ return model
62
+
63
+
64
+ class BiEncoderModel(MegatronModule):
65
+ """Bert-based module for Biencoder model."""
66
+
67
+ def __init__(self,
68
+ num_tokentypes=1,
69
+ parallel_output=True,
70
+ only_query_model=False,
71
+ only_context_model=False,
72
+ biencoder_shared_query_context_model=False,
73
+ pre_process=True,
74
+ post_process=True):
75
+ super(BiEncoderModel, self).__init__()
76
+ args = get_args()
77
+
78
+ bert_kwargs = dict(
79
+ num_tokentypes=num_tokentypes,
80
+ parallel_output=parallel_output,
81
+ pre_process=pre_process,
82
+ post_process=post_process)
83
+
84
+ self.biencoder_shared_query_context_model = \
85
+ biencoder_shared_query_context_model
86
+ assert not (only_context_model and only_query_model)
87
+ self.use_context_model = not only_query_model
88
+ self.use_query_model = not only_context_model
89
+ self.biencoder_projection_dim = args.biencoder_projection_dim
90
+
91
+ if self.biencoder_shared_query_context_model:
92
+ self.model = PretrainedBertModel(**bert_kwargs)
93
+ self._model_key = 'shared_model'
94
+ self.query_model, self.context_model = self.model, self.model
95
+ else:
96
+ if self.use_query_model:
97
+ # this model embeds (pseudo-)queries - Embed_input in the paper
98
+ self.query_model = PretrainedBertModel(**bert_kwargs)
99
+ self._query_key = 'query_model'
100
+
101
+ if self.use_context_model:
102
+ # this model embeds evidence blocks - Embed_doc in the paper
103
+ self.context_model = PretrainedBertModel(**bert_kwargs)
104
+ self._context_key = 'context_model'
105
+
106
+ def set_input_tensor(self, input_tensor):
107
+ """See megatron.model.transformer.set_input_tensor()"""
108
+ # this is just a placeholder and will be needed when model
109
+ # parallelism will be used
110
+ # self.language_model.set_input_tensor(input_tensor)
111
+ return
112
+
113
+ def forward(self, query_tokens, query_attention_mask, query_types,
114
+ context_tokens, context_attention_mask, context_types):
115
+ """Run a forward pass for each of the models and
116
+ return the respective embeddings."""
117
+
118
+ if self.use_query_model:
119
+ query_logits = self.embed_text(self.query_model,
120
+ query_tokens,
121
+ query_attention_mask,
122
+ query_types)
123
+ else:
124
+ raise ValueError("Cannot embed query without the query model.")
125
+ if self.use_context_model:
126
+ context_logits = self.embed_text(self.context_model,
127
+ context_tokens,
128
+ context_attention_mask,
129
+ context_types)
130
+ else:
131
+ raise ValueError("Cannot embed block without the block model.")
132
+ return query_logits, context_logits
133
+
134
+ @staticmethod
135
+ def embed_text(model, tokens, attention_mask, token_types):
136
+ """Embed a batch of tokens using the model"""
137
+ logits = model(tokens,
138
+ attention_mask,
139
+ token_types)
140
+ return logits
141
+
142
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
143
+ """Save dict with state dicts of each of the models."""
144
+ state_dict_ = {}
145
+ if self.biencoder_shared_query_context_model:
146
+ state_dict_[self._model_key] = \
147
+ self.model.state_dict_for_save_checkpoint(
148
+ prefix=prefix, keep_vars=keep_vars)
149
+ else:
150
+ if self.use_query_model:
151
+ state_dict_[self._query_key] = \
152
+ self.query_model.state_dict_for_save_checkpoint(
153
+ prefix=prefix, keep_vars=keep_vars)
154
+
155
+ if self.use_context_model:
156
+ state_dict_[self._context_key] = \
157
+ self.context_model.state_dict_for_save_checkpoint(
158
+ prefix=prefix, keep_vars=keep_vars)
159
+
160
+ return state_dict_
161
+
162
+ def load_state_dict(self, state_dict, strict=True):
163
+ """Load the state dicts of each of the models"""
164
+ if self.biencoder_shared_query_context_model:
165
+ print_rank_0("Loading shared query-context model")
166
+ self.model.load_state_dict(state_dict[self._model_key], \
167
+ strict=strict)
168
+ else:
169
+ if self.use_query_model:
170
+ print_rank_0("Loading query model")
171
+ self.query_model.load_state_dict( \
172
+ state_dict[self._query_key], strict=strict)
173
+
174
+ if self.use_context_model:
175
+ print_rank_0("Loading context model")
176
+ self.context_model.load_state_dict( \
177
+ state_dict[self._context_key], strict=strict)
178
+
179
+ def init_state_dict_from_bert(self):
180
+ """Initialize the state from a pretrained BERT model
181
+ on iteration zero of ICT pretraining"""
182
+ args = get_args()
183
+
184
+ if args.bert_load is None:
185
+ print_rank_0("bert-load argument is None")
186
+ return
187
+
188
+ tracker_filename = get_checkpoint_tracker_filename(args.bert_load)
189
+ if not os.path.isfile(tracker_filename):
190
+ raise FileNotFoundError("Could not find BERT checkpoint")
191
+ with open(tracker_filename, 'r') as f:
192
+ iteration = int(f.read().strip())
193
+ assert iteration > 0
194
+
195
+ checkpoint_name = get_checkpoint_name(args.bert_load, iteration, False)
196
+ if mpu.get_data_parallel_rank() == 0:
197
+ print('global rank {} is loading BERT checkpoint {}'.format(
198
+ torch.distributed.get_rank(), checkpoint_name))
199
+
200
+ # Load the checkpoint.
201
+ try:
202
+ state_dict = torch.load(checkpoint_name, map_location='cpu')
203
+ except ModuleNotFoundError:
204
+ from megatron.fp16_deprecated import loss_scaler
205
+ # For backward compatibility.
206
+ print_rank_0(' > deserializing using the old code structure ...')
207
+ sys.modules['fp16.loss_scaler'] = sys.modules[
208
+ 'megatron.fp16_deprecated.loss_scaler']
209
+ sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
210
+ 'megatron.fp16_deprecated.loss_scaler']
211
+ state_dict = torch.load(checkpoint_name, map_location='cpu')
212
+ sys.modules.pop('fp16.loss_scaler', None)
213
+ sys.modules.pop('megatron.fp16.loss_scaler', None)
214
+ except BaseException:
215
+ print_rank_0('could not load the BERT checkpoint')
216
+ sys.exit()
217
+
218
+ checkpoint_version = state_dict.get('checkpoint_version', 0)
219
+
220
+ # load the LM state dict into each model
221
+ model_dict = state_dict['model']['language_model']
222
+
223
+ if self.biencoder_shared_query_context_model:
224
+ self.model.language_model.load_state_dict(model_dict)
225
+ fix_query_key_value_ordering(self.model, checkpoint_version)
226
+ else:
227
+ if self.use_query_model:
228
+ self.query_model.language_model.load_state_dict(model_dict)
229
+ # give each model the same ict_head to begin with as well
230
+ if self.biencoder_projection_dim > 0:
231
+ query_proj_state_dict = \
232
+ self.state_dict_for_save_checkpoint()\
233
+ [self._query_key]['projection_enc']
234
+ fix_query_key_value_ordering(self.query_model, checkpoint_version)
235
+
236
+ if self.use_context_model:
237
+ self.context_model.language_model.load_state_dict(model_dict)
238
+ if self.query_model is not None and \
239
+ self.biencoder_projection_dim > 0:
240
+ self.context_model.projection_enc.load_state_dict\
241
+ (query_proj_state_dict)
242
+ fix_query_key_value_ordering(self.context_model, checkpoint_version)
243
+
244
+
245
+ class PretrainedBertModel(MegatronModule):
246
+ """BERT-based encoder for queries or contexts used for
247
+ learned information retrieval."""
248
+
249
+ def __init__(self, num_tokentypes=2,
250
+ parallel_output=True, pre_process=True, post_process=True):
251
+ super(PretrainedBertModel, self).__init__()
252
+
253
+ args = get_args()
254
+ tokenizer = get_tokenizer()
255
+ self.pad_id = tokenizer.pad
256
+ self.biencoder_projection_dim = args.biencoder_projection_dim
257
+ self.parallel_output = parallel_output
258
+ self.pre_process = pre_process
259
+ self.post_process = post_process
260
+ init_method = init_method_normal(args.init_method_std)
261
+ scaled_init_method = scaled_init_method_normal(
262
+ args.init_method_std, args.num_layers)
263
+
264
+ self.language_model, self._language_model_key = get_language_model(
265
+ num_tokentypes=num_tokentypes,
266
+ add_pooler=False,
267
+ encoder_attn_mask_type=AttnMaskType.padding,
268
+ init_method=init_method,
269
+ scaled_init_method=scaled_init_method,
270
+ pre_process=self.pre_process,
271
+ post_process=self.post_process)
272
+
273
+ if args.biencoder_projection_dim > 0:
274
+ self.projection_enc = get_linear_layer(args.hidden_size,
275
+ args.biencoder_projection_dim,
276
+ init_method,
277
+ gather_params_on_init=args.zero_stage == 3)
278
+ self._projection_enc_key = 'projection_enc'
279
+
280
+ def forward(self, input_ids, attention_mask, tokentype_ids=None):
281
+ extended_attention_mask = attention_mask.unsqueeze(1)
282
+ #extended_attention_mask = bert_extended_attention_mask(attention_mask)
283
+ position_ids = bert_position_ids(input_ids)
284
+
285
+ lm_output = self.language_model(input_ids,
286
+ position_ids,
287
+ extended_attention_mask,
288
+ tokentype_ids=tokentype_ids)
289
+ # This mask will be used in average-pooling and max-pooling
290
+ pool_mask = (input_ids == self.pad_id).unsqueeze(2)
291
+
292
+ # Taking the representation of the [CLS] token of BERT
293
+ pooled_output = lm_output[0, :, :]
294
+
295
+ # Converting to float16 dtype
296
+ pooled_output = pooled_output.to(lm_output.dtype)
297
+
298
+ # Output.
299
+ if self.biencoder_projection_dim:
300
+ pooled_output = self.projection_enc(pooled_output)
301
+
302
+ return pooled_output
303
+
304
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
305
+ """For easy load when model is combined with other heads,
306
+ add an extra key."""
307
+
308
+ state_dict_ = {}
309
+ state_dict_[self._language_model_key] \
310
+ = self.language_model.state_dict_for_save_checkpoint(
311
+ prefix=prefix, keep_vars=keep_vars)
312
+
313
+ if self.biencoder_projection_dim > 0:
314
+ state_dict_[self._projection_enc_key] = \
315
+ self.projection_enc.state_dict(prefix=prefix,
316
+ keep_vars=keep_vars)
317
+
318
+ return state_dict_
319
+
320
+ def load_state_dict(self, state_dict, strict=True):
321
+ """Customized load."""
322
+ print_rank_0("loading pretrained weights")
323
+ self.language_model.load_state_dict(
324
+ state_dict[self._language_model_key], strict=strict)
325
+
326
+ if self.biencoder_projection_dim > 0:
327
+ print_rank_0("loading projection head weights")
328
+ self.projection_enc.load_state_dict(
329
+ state_dict[self._projection_enc_key], strict=strict)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/classification.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Classification model."""
4
+
5
+ import torch
6
+
7
+ from megatron import get_args, print_rank_last
8
+ from megatron.model.enums import AttnMaskType
9
+ from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids
10
+ from megatron.model.language_model import get_language_model
11
+ from megatron.model.utils import get_linear_layer
12
+ from megatron.model.utils import init_method_normal
13
+ from megatron.model.utils import scaled_init_method_normal
14
+ from .module import MegatronModule
15
+
16
+
17
+ class Classification(MegatronModule):
18
+
19
+ def __init__(self,
20
+ config,
21
+ num_classes,
22
+ num_tokentypes=2,
23
+ pre_process=True,
24
+ post_process=True):
25
+ super().__init__(config=config, share_embeddings_and_output_weights=False)
26
+ args = get_args()
27
+
28
+ self.num_classes = num_classes
29
+ self.pre_process = pre_process
30
+ self.post_process = post_process
31
+
32
+ self.language_model, self._language_model_key = get_language_model(
33
+ config=config,
34
+ num_tokentypes=num_tokentypes,
35
+ add_pooler=True,
36
+ encoder_attn_mask_type=AttnMaskType.padding,
37
+ pre_process=self.pre_process,
38
+ post_process=self.post_process)
39
+
40
+ # Multi-choice head.
41
+ if self.post_process:
42
+ self.classification_dropout = torch.nn.Dropout(args.hidden_dropout)
43
+ self.classification_head = get_linear_layer(args.hidden_size,
44
+ self.num_classes,
45
+ init_method,
46
+ gather_params_on_init=args.zero_stage == 3)
47
+ self._classification_head_key = 'classification_head'
48
+
49
+ def set_input_tensor(self, input_tensor):
50
+ """See megatron.model.transformer.set_input_tensor()"""
51
+ self.language_model.set_input_tensor(input_tensor)
52
+
53
+ def forward(self, model_input, attention_mask, tokentype_ids=None):
54
+
55
+ extended_attention_mask = bert_extended_attention_mask(attention_mask)
56
+ input_ids = model_input
57
+ position_ids = bert_position_ids(input_ids)
58
+
59
+ lm_output = self.language_model(
60
+ input_ids,
61
+ position_ids,
62
+ extended_attention_mask,
63
+ tokentype_ids=tokentype_ids
64
+ )
65
+
66
+ if self.post_process:
67
+ _, pooled_output = lm_output[0], lm_output[1]
68
+ classification_output = self.classification_dropout(pooled_output)
69
+ classification_logits = self.classification_head(classification_output)
70
+
71
+ # Reshape back to separate choices.
72
+ classification_logits = classification_logits.view(-1, self.num_classes)
73
+
74
+ return classification_logits
75
+ return lm_output
76
+
77
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
78
+ """For easy load when model is combined with other heads,
79
+ add an extra key."""
80
+
81
+ state_dict_ = {}
82
+ state_dict_[self._language_model_key] \
83
+ = self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
84
+ keep_vars=keep_vars)
85
+ if self.post_process:
86
+ state_dict_[self._classification_head_key] \
87
+ = self.classification_head.state_dict(prefix=prefix, keep_vars=keep_vars)
88
+ return state_dict_
89
+
90
+ def load_state_dict(self, state_dict, strict=True):
91
+ """Customized load."""
92
+
93
+ self.language_model.load_state_dict(
94
+ state_dict[self._language_model_key], strict=strict)
95
+ if self.post_process:
96
+ if self._classification_head_key in state_dict:
97
+ self.classification_head.load_state_dict(
98
+ state_dict[self._classification_head_key], strict=strict)
99
+ else:
100
+ print_rank_last('***WARNING*** could not find {} in the checkpoint, '
101
+ 'initializing to random'.format(
102
+ self._classification_head_key))
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/distributed.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ from abc import ABC
4
+ from abc import abstractmethod
5
+ import math
6
+
7
+ import torch
8
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
9
+
10
+ from megatron import get_args
11
+ from megatron.core import mpu
12
+ from .module import MegatronModule
13
+ from deepspeed.accelerator import get_accelerator
14
+
15
+ class MemoryBuffer:
16
+
17
+ def __init__(self, numel, numel_padded, dtype):
18
+ self.numel = numel
19
+ self.numel_padded = numel_padded
20
+ self.dtype = dtype
21
+ self.data = torch.zeros(self.numel_padded,
22
+ dtype=self.dtype,
23
+ device=get_accelerator().current_device_name(),
24
+ requires_grad=False)
25
+
26
+ def zero(self):
27
+ """Reset the buffer to zero."""
28
+ self.data.zero_()
29
+
30
+
31
+ def get(self, shape, start_index):
32
+ """Return a tensor with the input `shape` as a view into the
33
+ 1-D data starting at `start_index`."""
34
+ end_index = start_index + shape.numel()
35
+ assert end_index <= self.numel, \
36
+ 'requested tensor is out of the buffer range.'
37
+ buffer_tensor = self.data[start_index:end_index]
38
+ buffer_tensor = buffer_tensor.view(shape)
39
+ return buffer_tensor
40
+
41
+
42
+
43
+ class DistributedDataParallelBase(MegatronModule, ABC):
44
+ """Abstract class for DDP."""
45
+
46
+ def __init__(self, module):
47
+ super(DistributedDataParallelBase, self).__init__()
48
+ # Keep a pointer to the model.
49
+ self.module = module
50
+
51
+
52
+ @abstractmethod
53
+ def allreduce_gradients(self):
54
+ pass
55
+
56
+
57
+ def forward(self, *inputs, **kwargs):
58
+ return self.module(*inputs, **kwargs)
59
+
60
+
61
+ def state_dict(self, prefix='', keep_vars=False):
62
+ return self.module.state_dict(prefix=prefix, keep_vars=keep_vars)
63
+
64
+
65
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
66
+ return self.module.state_dict_for_save_checkpoint(prefix=prefix,
67
+ keep_vars=keep_vars)
68
+
69
+
70
+ def load_state_dict(self, state_dict, strict=True):
71
+ self.module.load_state_dict(state_dict, strict=strict)
72
+
73
+
74
+
75
+ class DistributedDataParallel(DistributedDataParallelBase):
76
+ """DDP with contiguous buffers options to storre and accumulate gradients.
77
+ This class:
78
+ - has the potential to reduce memory fragmentation.
79
+ - provides the option to do the gradient accumulation
80
+ in a type other than the params type (for example fp32)
81
+
82
+ Arguments:
83
+ module: input model.
84
+ accumulate_allreduce_grads_in_fp32: if true do the gradient accumulation
85
+ and the gradient all-reduce all in in float32. If this option is
86
+ true, we require `use_contiguous_buffers` to be true too.
87
+ use_contiguous_buffers: if true, use a contiguous buffer to store the
88
+ gradients.
89
+ """
90
+
91
+ def __init__(self, module,
92
+ accumulate_allreduce_grads_in_fp32,
93
+ use_contiguous_buffers):
94
+
95
+ super(DistributedDataParallel, self).__init__(module)
96
+
97
+ self.accumulate_allreduce_grads_in_fp32 \
98
+ = accumulate_allreduce_grads_in_fp32
99
+ self.use_contiguous_buffers = use_contiguous_buffers
100
+ # If we are using fp32-accumulate-allreduce explicitly
101
+ # this means we need main grads in a continous buffer.
102
+ if self.accumulate_allreduce_grads_in_fp32:
103
+ assert self.use_contiguous_buffers
104
+
105
+ # ===================================
106
+ # Rest of this part applies only to
107
+ # the case we use continuous buffers.
108
+ # ===================================
109
+ self._grad_buffers = None
110
+ self._grad_buffer_param_index_map = None
111
+ if self.use_contiguous_buffers:
112
+ self._grad_buffers = {}
113
+ self._grad_buffer_param_index_map = {}
114
+ data_parallel_world_size = mpu.get_data_parallel_world_size()
115
+
116
+ # Simple function to define buffer type.
117
+ def _get_buffer_type(param):
118
+ return torch.float if \
119
+ self.accumulate_allreduce_grads_in_fp32 else param.dtype
120
+
121
+ # First calculate total number of elements per type.
122
+ type_num_elements = {}
123
+ for param in self.module.parameters():
124
+ if param.requires_grad:
125
+ dtype = _get_buffer_type(param)
126
+ type_num_elements[dtype] = type_num_elements.get(dtype, 0) \
127
+ + param.data.nelement()
128
+
129
+ # Allocate the buffer.
130
+ for dtype, num_elements in type_num_elements.items():
131
+
132
+ # If using distributed optimizer, pad memory buffer to be
133
+ # multiple of data_parallel_world_size. (This padding is done
134
+ # due to a constraint with the reduce_scatter op, which requires
135
+ # all tensors have equal size. See: optimizer.py.)
136
+ num_elements_padded = data_parallel_world_size * \
137
+ int(math.ceil(num_elements / data_parallel_world_size))
138
+
139
+ # Allocate grad buffer.
140
+ self._grad_buffers[dtype] = MemoryBuffer(num_elements,
141
+ num_elements_padded,
142
+ dtype)
143
+
144
+ # Assume the back prop order is reverse the params order,
145
+ # store the start index for the gradients.
146
+ for param in self.module.parameters():
147
+ if param.requires_grad:
148
+ dtype = _get_buffer_type(param)
149
+ type_num_elements[dtype] -= param.data.nelement()
150
+ param.main_grad = self._grad_buffers[dtype].get(
151
+ param.data.shape, type_num_elements[dtype])
152
+ if dtype not in self._grad_buffer_param_index_map:
153
+ self._grad_buffer_param_index_map[dtype] = {}
154
+ self._grad_buffer_param_index_map[dtype][param] = (
155
+ type_num_elements[dtype],
156
+ type_num_elements[dtype] + param.data.nelement(),
157
+ )
158
+
159
+ # Backward hook.
160
+ # Accumalation function for the gradients. We need
161
+ # to store them so they don't go out of scope.
162
+ self.grad_accs = []
163
+ # Loop over all the parameters in the model.
164
+ for param in self.module.parameters():
165
+ if param.requires_grad:
166
+ # Expand so we get access to grad_fn.
167
+ param_tmp = param.expand_as(param)
168
+ # Get the gradient accumulator functtion.
169
+ grad_acc = param_tmp.grad_fn.next_functions[0][0]
170
+ grad_acc.register_hook(self._make_param_hook(param))
171
+ self.grad_accs.append(grad_acc)
172
+
173
+
174
+ def _make_param_hook(self, param):
175
+ """Create the all-reduce hook for backprop."""
176
+ # Hook used for back-prop.
177
+ def param_hook(*unused):
178
+ # Add the gradient to the buffer.
179
+ if param.grad is not None:
180
+ # The gradient function of linear layers is fused with GEMMs
181
+ param.main_grad.add_(param.grad.data)
182
+ # Now we can deallocate grad memory.
183
+ param.grad = None
184
+ return param_hook
185
+
186
+
187
+ def zero_grad_buffer(self):
188
+ """Set the grad buffer data to zero. Needs to be called at the
189
+ begining of each iteration."""
190
+ assert self._grad_buffers is not None, 'buffers are not initialized.'
191
+ for _, buffer_ in self._grad_buffers.items():
192
+ buffer_.zero()
193
+
194
+
195
+ def broadcast_params(self):
196
+ for param in self.module.parameters():
197
+ torch.distributed.broadcast(param.data,
198
+ src=mpu.get_data_parallel_src_rank(),
199
+ group=mpu.get_data_parallel_group())
200
+
201
+
202
+ def allreduce_gradients(self):
203
+ """Reduce gradients across data parallel ranks."""
204
+ # If we have buffers, simply reduce the data in the buffer.
205
+ if self._grad_buffers is not None:
206
+ for _, buffer_ in self._grad_buffers.items():
207
+ buffer_.data /= mpu.get_data_parallel_world_size()
208
+ torch.distributed.all_reduce(
209
+ buffer_.data, group=mpu.get_data_parallel_group())
210
+ else:
211
+ # Otherwise, bucketize and all-reduce
212
+ buckets = {}
213
+ # Pack the buckets.
214
+ for param in self.module.parameters():
215
+ if param.requires_grad and param.grad is not None:
216
+ tp = param.data.type()
217
+ if tp not in buckets:
218
+ buckets[tp] = []
219
+ buckets[tp].append(param)
220
+
221
+ # For each bucket, all-reduce and copy all-reduced grads.
222
+ for tp in buckets:
223
+ bucket = buckets[tp]
224
+ grads = [param.grad.data for param in bucket]
225
+ coalesced = _flatten_dense_tensors(grads)
226
+ coalesced /= mpu.get_data_parallel_world_size()
227
+ torch.distributed.all_reduce(
228
+ coalesced, group=mpu.get_data_parallel_group())
229
+ for buf, synced in zip(grads, _unflatten_dense_tensors(
230
+ coalesced, grads)):
231
+ buf.copy_(synced)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/enums.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import enum
4
+
5
+ class LayerType(enum.Enum):
6
+ encoder = 1
7
+ decoder = 2
8
+ retro_encoder = 3
9
+ retro_decoder = 4
10
+ retro_decoder_with_retriever = 5
11
+
12
+ class AttnType(enum.Enum):
13
+ self_attn = 1
14
+ cross_attn = 2
15
+
16
+ class AttnMaskType(enum.Enum):
17
+ padding = 1
18
+ causal = 2
19
+
20
+ # For backward compatibility with old model checkpoints
21
+ from megatron.core.enums import ModelType
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/fused_bias_gelu.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+
5
+
6
+ ###### BIAS GELU FUSION/ NO AUTOGRAD ################
7
+ # 1/sqrt(2*pi)-> 0.3989423
8
+ # 1/sqrt(2) -> 0.70710678
9
+ # sqrt(2/pi) -> 0.79788456
10
+ # this function is tanh approximation of gelu
11
+ # actual gelu is:
12
+ # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
13
+
14
+ @torch.jit.script
15
+ def bias_gelu(bias, y):
16
+ x = bias + y
17
+ return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
18
+
19
+ # gradient of tanh approximation of gelu
20
+ # gradient of actual gelu is:
21
+ # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
22
+ @torch.jit.script
23
+ def bias_gelu_back(g, bias, y):
24
+ x = bias + y
25
+ tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
26
+ # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
27
+ ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
28
+ return ff*g
29
+
30
+ class GeLUFunction(torch.autograd.Function):
31
+ @staticmethod
32
+ # bias is an optional argument
33
+ def forward(ctx, input, bias):
34
+ ctx.save_for_backward(input, bias)
35
+ return bias_gelu(bias, input)
36
+
37
+ @staticmethod
38
+ def backward(ctx, grad_output):
39
+ input, bias = ctx.saved_tensors
40
+ tmp = bias_gelu_back(grad_output, bias, input)
41
+ return tmp, tmp
42
+
43
+ bias_gelu_impl = GeLUFunction.apply
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/fused_layer_norm.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """This code is copied fron NVIDIA apex:
5
+ https://github.com/NVIDIA/apex
6
+ with some changes. """
7
+
8
+ import numbers
9
+ import torch
10
+ from torch.nn.parameter import Parameter
11
+ from torch.nn import init
12
+ import importlib
13
+ from torch.nn import functional as F
14
+ import inspect
15
+
16
+ from megatron.core.utils import make_viewless_tensor
17
+ from megatron import get_args
18
+
19
+ from deepspeed.accelerator.real_accelerator import get_accelerator
20
+
21
+ try:
22
+ from apex.contrib.layer_norm.layer_norm import FastLayerNormFN
23
+ HAVE_PERSIST_LAYER_NORM = True
24
+ except:
25
+ HAVE_PERSIST_LAYER_NORM = False
26
+
27
+ try:
28
+ from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction
29
+ except ModuleNotFoundError:
30
+ pass
31
+
32
+
33
+ global fused_layer_norm_cuda
34
+ fused_layer_norm_cuda = None
35
+
36
+
37
+ class MixedFusedLayerNorm(torch.nn.Module):
38
+
39
+ def __init__(self, normalized_shape, eps=1e-5,
40
+ no_persist_layer_norm=True,
41
+ sequence_parallel=False,
42
+ apply_layernorm_1p=False,
43
+ mem_efficient_ln=True):
44
+ super(MixedFusedLayerNorm, self).__init__()
45
+
46
+ self.apply_layernorm_1p = apply_layernorm_1p
47
+ self.mem_efficient_ln = mem_efficient_ln
48
+
49
+ if get_accelerator().device_name() == 'cuda':
50
+ global fused_layer_norm_cuda
51
+ fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
52
+
53
+ # List of hiddens sizes supported in the persistent layer norm kernel
54
+ # If the hidden size is not supported, fall back to the non-persistent
55
+ # kernel.
56
+ persist_ln_hidden_sizes = [1024, 1536, 2048, 2304, 3072, 3840, 4096,
57
+ 5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480,
58
+ 24576, 25600, 30720, 32768, 40960, 49152, 65536]
59
+ if normalized_shape not in persist_ln_hidden_sizes or \
60
+ not HAVE_PERSIST_LAYER_NORM:
61
+ no_persist_layer_norm = True
62
+
63
+ if isinstance(normalized_shape, numbers.Integral):
64
+ normalized_shape = (normalized_shape,)
65
+ self.normalized_shape = torch.Size(normalized_shape)
66
+ self.eps = eps
67
+ self.weight = Parameter(torch.empty(*normalized_shape,
68
+ device=get_accelerator().current_device_name(),
69
+ dtype=get_args().params_dtype))
70
+ self.bias = Parameter(torch.empty(*normalized_shape,
71
+ device=get_accelerator().current_device_name(),
72
+ dtype=get_args().params_dtype))
73
+ self.reset_parameters()
74
+ self.no_persist_layer_norm = no_persist_layer_norm
75
+ self.sequence_parallel = sequence_parallel
76
+
77
+ # set sequence parallelism flag on weight and bias parameters
78
+ setattr(self.weight, 'sequence_parallel', self.sequence_parallel)
79
+ setattr(self.bias, 'sequence_parallel', self.sequence_parallel)
80
+
81
+
82
+ def reset_parameters(self):
83
+
84
+ if self.apply_layernorm_1p:
85
+ init.zeros_(self.weight)
86
+ init.zeros_(self.bias)
87
+ else:
88
+ init.ones_(self.weight)
89
+ init.zeros_(self.bias)
90
+
91
+ def forward(self, input):
92
+
93
+ weight = self.weight + 1 if self.apply_layernorm_1p else self.weight
94
+ # CPU path is here for unittest sake.
95
+ if not input.is_cuda:
96
+ if get_accelerator().device_name() == 'cuda':
97
+ print("WARNING! The input of FusedLayerNorm should be on the GPU."
98
+ "This warning should only be triggered in the FusedLayerNorm unit tests.")
99
+ return F.layer_norm(input, self.normalized_shape, weight, self.bias, self.eps)
100
+
101
+ if self.no_persist_layer_norm:
102
+ # Apex does not have versions yet (https://github.com/NVIDIA/apex/pull/1648), so we need to inspect
103
+ # the function manually on whether the extra arg introduced in https://github.com/NVIDIA/apex/pull/1715 exists yet
104
+ if 'memory_efficient' in inspect.getfullargspec(FusedLayerNormAffineFunction.forward).args:
105
+ return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps, self.mem_efficient_ln)
106
+ else:
107
+ return FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.normalized_shape, self.eps)
108
+ else:
109
+ output = FastLayerNormFN.apply(input, weight, self.bias, self.eps)
110
+
111
+ # Apex's fast layer norm function outputs a 'view' tensor (i.e., has
112
+ # a populated '_base' field). This will result in schedule.py's
113
+ # deallocate_output_tensor() throwing an error, so a viewless tensor is
114
+ # created to prevent this.
115
+ output = make_viewless_tensor(inp = output,
116
+ requires_grad = input.requires_grad,
117
+ keep_graph = True)
118
+
119
+ return output
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/fused_softmax.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from megatron.model.enums import AttnMaskType
7
+
8
+
9
+ class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
10
+ """
11
+ Fused operation which performs following three operations in sequence
12
+ 1. Scale the tensor.
13
+ 2. Apply upper triangular mask (typically used in gpt models).
14
+ 3. Perform softmax.
15
+ """
16
+
17
+ @staticmethod
18
+ def forward(ctx, inputs, scale):
19
+ import scaled_upper_triang_masked_softmax_cuda
20
+
21
+ scale_t = torch.tensor([scale])
22
+ softmax_results = scaled_upper_triang_masked_softmax_cuda.forward(
23
+ inputs, scale_t[0]
24
+ )
25
+
26
+ ctx.save_for_backward(softmax_results, scale_t)
27
+ return softmax_results
28
+
29
+ @staticmethod
30
+ def backward(ctx, output_grads):
31
+ import scaled_upper_triang_masked_softmax_cuda
32
+
33
+ softmax_results, scale_t = ctx.saved_tensors
34
+ input_grads = scaled_upper_triang_masked_softmax_cuda.backward(
35
+ output_grads, softmax_results, scale_t[0]
36
+ )
37
+
38
+ return input_grads, None
39
+
40
+
41
+ class ScaledMaskedSoftmax(torch.autograd.Function):
42
+ """
43
+ Fused operation which performs following three operations in sequence
44
+ 1. Scale the tensor.
45
+ 2. Apply the mask.
46
+ 3. Perform softmax.
47
+ """
48
+
49
+ @staticmethod
50
+ def forward(ctx, inputs, mask, scale):
51
+ import scaled_masked_softmax_cuda
52
+
53
+ scale_t = torch.tensor([scale])
54
+
55
+ softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
56
+ ctx.save_for_backward(softmax_results, scale_t)
57
+ return softmax_results
58
+
59
+ @staticmethod
60
+ def backward(ctx, output_grads):
61
+ import scaled_masked_softmax_cuda
62
+
63
+ softmax_results, scale_t = ctx.saved_tensors
64
+
65
+ input_grads = scaled_masked_softmax_cuda.backward(
66
+ output_grads, softmax_results, scale_t[0]
67
+ )
68
+ return input_grads, None, None
69
+
70
+
71
+ class ScaledSoftmax(torch.autograd.Function):
72
+ """
73
+ Fused operation which performs following two operations in sequence
74
+ 1. Scale the tensor.
75
+ 2. Perform softmax.
76
+ """
77
+
78
+ @staticmethod
79
+ def forward(ctx, inputs, scale):
80
+ import scaled_softmax_cuda
81
+
82
+ scale_t = torch.tensor([scale])
83
+
84
+ softmax_results = scaled_softmax_cuda.forward(
85
+ inputs, scale_t[0]
86
+ )
87
+ ctx.save_for_backward(softmax_results, scale_t)
88
+ return softmax_results
89
+
90
+ @staticmethod
91
+ def backward(ctx, output_grads):
92
+ import scaled_softmax_cuda
93
+
94
+ softmax_results, scale_t = ctx.saved_tensors
95
+
96
+ input_grads = scaled_softmax_cuda.backward(
97
+ output_grads, softmax_results, scale_t[0]
98
+ )
99
+ return input_grads, None, None
100
+
101
+
102
+ class FusedScaleMaskSoftmax(nn.Module):
103
+ """
104
+ fused operation: scaling + mask + softmax
105
+
106
+ Arguments:
107
+ input_in_fp16: flag to indicate if input in fp16 data format.
108
+ input_in_bf16: flag to indicate if input in bf16 data format.
109
+ attn_mask_type: attention mask type (pad or causal)
110
+ scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
111
+ mask_func: mask function to be applied.
112
+ softmax_in_fp32: if true, softmax in performed at fp32 precision.
113
+ scale: scaling factor used in input tensor scaling.
114
+ """
115
+
116
+ def __init__(
117
+ self,
118
+ input_in_fp16,
119
+ input_in_bf16,
120
+ attn_mask_type,
121
+ scaled_masked_softmax_fusion,
122
+ mask_func,
123
+ softmax_in_fp32,
124
+ scale,
125
+ ):
126
+ super(FusedScaleMaskSoftmax, self).__init__()
127
+ self.input_in_fp16 = input_in_fp16
128
+ self.input_in_bf16 = input_in_bf16
129
+ assert not (
130
+ self.input_in_fp16 and self.input_in_bf16
131
+ ), "both fp16 and bf16 flags cannot be active at the same time."
132
+ self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
133
+ self.attn_mask_type = attn_mask_type
134
+ self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
135
+ self.mask_func = mask_func
136
+ self.softmax_in_fp32 = softmax_in_fp32
137
+ self.scale = scale
138
+
139
+ assert (
140
+ self.scale is None or softmax_in_fp32
141
+ ), "softmax should be in fp32 when scaled"
142
+
143
+ def forward(self, input, mask):
144
+ # [b, np, sq, sk]
145
+ assert input.dim() == 4
146
+
147
+ if self.is_kernel_available(mask, *input.size()):
148
+ return self.forward_fused_softmax(input, mask)
149
+ else:
150
+ return self.forward_torch_softmax(input, mask)
151
+
152
+ def is_kernel_available(self, mask, b, np, sq, sk):
153
+ attn_batches = b * np
154
+
155
+ if (
156
+ self.scaled_masked_softmax_fusion # user want to fuse
157
+ and self.input_in_float16 # input must be fp16
158
+ and 16 < sk <= 4096 # sk must be 16 ~ 2048
159
+ and sq % 4 == 0 # sq must be divisor of 4
160
+ and sk % 4 == 0 # sk must be divisor of 4
161
+ and attn_batches % 4 == 0 # np * b must be divisor of 4
162
+ ):
163
+ if 0 <= sk <= 4096:
164
+ batch_per_block = self.get_batch_per_block(sq, sk, b, np)
165
+
166
+ if self.attn_mask_type.value == AttnMaskType.causal.value:
167
+ if attn_batches % batch_per_block == 0:
168
+ return True
169
+ else:
170
+ if sq % batch_per_block == 0:
171
+ return True
172
+ return False
173
+
174
+ def forward_fused_softmax(self, input, mask):
175
+ b, np, sq, sk = input.size()
176
+ scale = self.scale if self.scale is not None else 1.0
177
+
178
+ if self.attn_mask_type.value == AttnMaskType.causal.value:
179
+ assert sq == sk, "causal mask is only for self attention"
180
+
181
+ # input is 3D tensor (attn_batches, sq, sk)
182
+ input = input.view(-1, sq, sk)
183
+ probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)
184
+ return probs.view(b, np, sq, sk)
185
+ else:
186
+ # input is 4D tensor (b, np, sq, sk)
187
+ if mask is not None:
188
+ return ScaledMaskedSoftmax.apply(input, mask, scale)
189
+ else:
190
+ return ScaledSoftmax.apply(input, scale)
191
+
192
+ def forward_torch_softmax(self, input, mask):
193
+ if self.input_in_float16 and self.softmax_in_fp32:
194
+ input = input.float()
195
+
196
+ if self.scale is not None:
197
+ input = input * self.scale
198
+ mask_output = self.mask_func(input, mask) if mask is not None else input
199
+ probs = torch.nn.Softmax(dim=-1)(mask_output)
200
+
201
+ if self.input_in_float16 and self.softmax_in_fp32:
202
+ if self.input_in_fp16:
203
+ probs = probs.half()
204
+ else:
205
+ probs = probs.bfloat16()
206
+
207
+ return probs
208
+
209
+ @staticmethod
210
+ def get_batch_per_block(sq, sk, b, np):
211
+ import scaled_masked_softmax_cuda
212
+
213
+ return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/gpt_model.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """GPT-2 model."""
5
+
6
+ import torch
7
+
8
+ from megatron import get_args
9
+ from megatron.core import mpu, tensor_parallel, sequence_parallel
10
+ from .module import MegatronModule, fp32_to_float16, float16_to_fp32
11
+
12
+ from .enums import AttnMaskType
13
+ from .language_model import parallel_lm_logits
14
+ from .language_model import get_language_model
15
+ from .utils import init_method_normal
16
+ from .utils import scaled_init_method_normal
17
+
18
+ from megatron.model import LayerNorm
19
+ from .language_model import EmbeddingPipe
20
+ from .rmsnorm import RMSNorm
21
+ from .transformer import ParallelTransformerLayerPipe, LMHeadPipe
22
+ from deepspeed.pipe import PipelineModule, LayerSpec, TiedLayerSpec
23
+
24
+ try:
25
+ from apex.normalization import MixedFusedRMSNorm
26
+ except ImportError:
27
+ MixedFusedRMSNorm = RMSNorm
28
+
29
+ try:
30
+ from deepspeed.checkpoint import (
31
+ VOCABULARY_PARAMETER_PATTERNS,
32
+ PIPELINE_REPLICATED_PARAMETER_PATTERNS,
33
+ TP_REPLICATED_PARAMETER_PATTERNS,
34
+ PARAMETER_WITH_ROW_PARALLELISM_PATTERNS,
35
+ PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0,
36
+ )
37
+ DS_UNIVERSAL_CHECKPOINT_INFO = True
38
+ except ImportError:
39
+ DS_UNIVERSAL_CHECKPOINT_INFO = False
40
+
41
+
42
+ def post_language_model_processing(lm_output, labels, logit_weights,
43
+ parallel_output,
44
+ fp16_lm_cross_entropy):
45
+
46
+ # Output. Format [s b h]
47
+ output = parallel_lm_logits(
48
+ lm_output,
49
+ logit_weights,
50
+ parallel_output)
51
+
52
+ if labels is None:
53
+ # [s b h] => [b s h]
54
+ return output.transpose(0,1).contiguous()
55
+ else:
56
+ # [b s] => [s b]
57
+ labels = labels.transpose(0,1).contiguous()
58
+ cross_entropy = sequence_parallel.vocab_sequence_parallel_cross_entropy if mpu.get_sequence_parallel_world_size() > 1 \
59
+ else tensor_parallel.vocab_parallel_cross_entropy
60
+ if fp16_lm_cross_entropy:
61
+ assert output.dtype == torch.half
62
+ loss = cross_entropy(output, labels)
63
+ else:
64
+ loss = cross_entropy(output.float(), labels)
65
+
66
+ # [s b] => [b, s]
67
+ loss = loss.transpose(0,1).contiguous()
68
+ return loss
69
+
70
+
71
+ class GPTModel(MegatronModule):
72
+ """GPT-2 Language model."""
73
+
74
+ def __init__(self,
75
+ config,
76
+ num_tokentypes=0,
77
+ parallel_output=True,
78
+ pre_process=True,
79
+ post_process=True,
80
+ return_moe_loss=True):
81
+ args = get_args()
82
+ super().__init__(config=config, share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights)
83
+
84
+ self.parallel_output = parallel_output
85
+ self.pre_process = pre_process
86
+ self.post_process = post_process
87
+ self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
88
+ self.return_moe_loss = return_moe_loss
89
+ self.untie_embeddings_and_output_weights = args.untie_embeddings_and_output_weights
90
+
91
+ self.language_model, self._language_model_key = get_language_model(
92
+ config=config,
93
+ num_tokentypes=num_tokentypes,
94
+ add_pooler=False,
95
+ encoder_attn_mask_type=AttnMaskType.causal,
96
+ pre_process=self.pre_process,
97
+ post_process=self.post_process,
98
+ num_experts=args.num_experts)
99
+
100
+ if not args.untie_embeddings_and_output_weights:
101
+ self.initialize_word_embeddings()
102
+
103
+ def set_input_tensor(self, input_tensor):
104
+ """See megatron.model.transformer.set_input_tensor()"""
105
+ self.language_model.set_input_tensor(input_tensor)
106
+
107
+ def forward(self, input_ids, position_ids, attention_mask,
108
+ retriever_input_ids=None,
109
+ retriever_position_ids=None,
110
+ retriever_attn_mask=None,
111
+ labels=None, tokentype_ids=None, inference_params=None,
112
+ curriculum_seqlen=None):
113
+ args = get_args()
114
+ if curriculum_seqlen is not None:
115
+ args.curriculum_seqlen = curriculum_seqlen
116
+ if curriculum_seqlen < input_ids.size()[1]:
117
+ # seqlen-based curriculum learning
118
+ # input_ids, position_ids, labels have size [batch size, seqlen]
119
+ input_ids = input_ids[:, :curriculum_seqlen].contiguous()
120
+ position_ids = position_ids[:, :curriculum_seqlen].contiguous()
121
+ if labels is not None:
122
+ labels = labels[:, :curriculum_seqlen].contiguous()
123
+
124
+ # attention_mask has size [1, 1, seqlen, seqlen]
125
+ attention_mask = attention_mask[:, :, :curriculum_seqlen, :curriculum_seqlen].contiguous()
126
+ else:
127
+ if args.curriculum_learning_legacy:
128
+ # If got a None input, need to reset curriculum_seqlen on user side
129
+ args.curriculum_seqlen = args.seq_length
130
+
131
+ lm_output, moe_losses = self.language_model(
132
+ input_ids,
133
+ position_ids,
134
+ attention_mask,
135
+ retriever_input_ids=retriever_input_ids,
136
+ retriever_position_ids=retriever_position_ids,
137
+ retriever_attn_mask=retriever_attn_mask,
138
+ inference_params=inference_params)
139
+
140
+ if self.post_process:
141
+ lm_output = post_language_model_processing(
142
+ lm_output, labels,
143
+ self.language_model.output_layer.weight if self.untie_embeddings_and_output_weights else self.shared_embedding_or_output_weight(),
144
+ self.parallel_output,
145
+ self.fp16_lm_cross_entropy)
146
+
147
+ return lm_output, moe_losses if self.return_moe_loss else lm_output
148
+
149
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
150
+
151
+ state_dict_ = {}
152
+ language_model_state_dict = self.language_model.state_dict_for_save_checkpoint(
153
+ prefix=prefix, keep_vars=keep_vars)
154
+ # MoE states need to be handled separately by DeepSpeed engine, thus
155
+ # moving them to the top level dictionary
156
+ if "moe_state_dict" in language_model_state_dict:
157
+ for key in list(language_model_state_dict["moe_state_dict"].keys()):
158
+ state_dict_[key] = language_model_state_dict["moe_state_dict"].pop(key)
159
+ del language_model_state_dict["moe_state_dict"]
160
+ state_dict_[self._language_model_key] = language_model_state_dict
161
+ # Save word_embeddings.
162
+ if self.post_process and not self.pre_process and not self.untie_embeddings_and_output_weights:
163
+ state_dict_[self._word_embeddings_for_head_key] \
164
+ = self.word_embeddings.state_dict(prefix=prefix,
165
+ keep_vars=keep_vars)
166
+ return state_dict_
167
+
168
+ def load_state_dict(self, state_dict, strict=True):
169
+ """Customized load."""
170
+
171
+ # Load word_embeddings.
172
+ if self.post_process and not self.pre_process and not self.untie_embeddings_and_output_weights:
173
+ self.word_embeddings.load_state_dict(
174
+ state_dict[self._word_embeddings_for_head_key], strict=strict)
175
+ # Gather MoE states and move under language model
176
+ moe_state_dict = {}
177
+ for key in list(state_dict.keys()):
178
+ if 'expert' in key and 'moe.gate.wg.weight' not in key:
179
+ moe_state_dict[key] = state_dict.pop(key)
180
+ if self._language_model_key in state_dict:
181
+ state_dict = state_dict[self._language_model_key]
182
+ if len(moe_state_dict) > 0:
183
+ state_dict["moe_state_dict"] = moe_state_dict
184
+ self.language_model.load_state_dict(state_dict, strict=strict)
185
+
186
+ def universal_checkpoint_info(self):
187
+ info = dict()
188
+ if DS_UNIVERSAL_CHECKPOINT_INFO:
189
+ # Vocabulary parameters (embeddings) that require special handling due to padding.
190
+ info[VOCABULARY_PARAMETER_PATTERNS] = [
191
+ r"tied_modules.embed.word_embeddings.weight"
192
+ ]
193
+
194
+ # Parameter slices that should be averaged not concatenated.
195
+ info[TP_REPLICATED_PARAMETER_PATTERNS] = [
196
+ r"tied_modules.embed.position_embeddings.weight",
197
+ r"\d+.input_layernorm.weight",
198
+ r"\d+.input_layernorm.bias",
199
+ r"\d+.post_attention_layernorm.weight",
200
+ r"\d+.post_attention_layernorm.bias",
201
+ r"\d+.self_attention.dense.bias",
202
+ r"\d+.mlp.dense_4h_to_h.bias",
203
+ r"\d+.weight",
204
+ r"\d+.bias",
205
+ ]
206
+
207
+ # Parameter that are sliced on the row dimension
208
+ info[PARAMETER_WITH_ROW_PARALLELISM_PATTERNS] = [
209
+ r"\d+.mlp.dense_4h_to_h.weight",
210
+ r"\d+.self_attention.dense.weight",
211
+ ]
212
+
213
+ return info
214
+
215
+ def CrossEntropy(output, labels):
216
+ labels, loss_mask = labels[0], labels[1]
217
+
218
+ args = get_args()
219
+
220
+ # [b s] => [s b]
221
+ labels = labels.transpose(0, 1).contiguous()
222
+ losses = tensor_parallel.vocab_parallel_cross_entropy(output.contiguous().float(), labels)
223
+ # [s b] => [b, s]
224
+ losses = losses.transpose(0, 1).contiguous()
225
+ loss_mask = loss_mask.view(-1)
226
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
227
+ return loss
228
+
229
+
230
+ class GPTModelPipe(PipelineModule,MegatronModule):
231
+ """GPT-2 Language model."""
232
+
233
+ def __init__(self,
234
+ config,
235
+ num_tokentypes=0,
236
+ parallel_output=True):
237
+ args = get_args()
238
+ self.parallel_output = parallel_output
239
+
240
+ if config.init_method is None:
241
+ config.init_method = init_method_normal(config.init_method_std)
242
+
243
+ if config.output_layer_init_method is None:
244
+ config.output_layer_init_method = scaled_init_method_normal(config.init_method_std,
245
+ config.num_layers)
246
+
247
+ self.specs = []
248
+
249
+ def _to_float16(inputs):
250
+ if args.fp16:
251
+ return fp32_to_float16(inputs, lambda v: v.half())
252
+ elif args.bf16:
253
+ return fp32_to_float16(inputs, lambda v: v.bfloat16())
254
+ else:
255
+ return inputs
256
+
257
+ self.specs.append(_to_float16)
258
+
259
+ # Embedding layer
260
+ if args.untie_embeddings_and_output_weights:
261
+ self.specs.append(LayerSpec(EmbeddingPipe,
262
+ args.hidden_size,
263
+ args.padded_vocab_size,
264
+ args.max_position_embeddings,
265
+ args.hidden_dropout,
266
+ config,
267
+ add_position_embedding=args.add_position_embedding,
268
+ num_tokentypes=num_tokentypes,
269
+ embedding_weights_in_fp32=args.embedding_weights_in_fp32,))
270
+ else:
271
+ self.specs.append(TiedLayerSpec('embed',
272
+ EmbeddingPipe,
273
+ args.hidden_size,
274
+ args.padded_vocab_size,
275
+ args.max_position_embeddings,
276
+ args.hidden_dropout,
277
+ config,
278
+ add_position_embedding=args.add_position_embedding,
279
+ num_tokentypes=num_tokentypes,
280
+ embedding_weights_in_fp32=args.embedding_weights_in_fp32,
281
+ tied_weight_attr='word_embeddings_weight'))
282
+
283
+ for layer_idx in range(args.num_layers):
284
+ self.specs.append(
285
+ LayerSpec(ParallelTransformerLayerPipe,
286
+ config,
287
+ layer_number=layer_idx,
288
+ self_attn_mask_type=AttnMaskType.causal))
289
+
290
+ # Final layernorm after transformer layers
291
+ if args.normalization == 'layernorm':
292
+ self.specs.append(LayerSpec(LayerNorm,
293
+ args.hidden_size,
294
+ eps=args.layernorm_epsilon,
295
+ sequence_parallel=args.sequence_parallel))
296
+ else:
297
+ self.specs.append(LayerSpec(MixedFusedRMSNorm, args.hidden_size,
298
+ args.layernorm_epsilon,
299
+ sequence_parallel=args.sequence_parallel))
300
+
301
+ def _logits_helper(embedding, lm_output):
302
+ """A wrapper to massage inputs/outputs from pipeline. """
303
+ return parallel_lm_logits(
304
+ lm_output,
305
+ embedding.word_embeddings_weight,
306
+ self.parallel_output)
307
+ if args.untie_embeddings_and_output_weights:
308
+ self.specs.append(
309
+ LayerSpec(LMHeadPipe, args.hidden_size, args.padded_vocab_size, config)
310
+ )
311
+ else:
312
+ self.specs.append(
313
+ TiedLayerSpec('embed',
314
+ EmbeddingPipe,
315
+ args.hidden_size,
316
+ args.padded_vocab_size,
317
+ args.max_position_embeddings,
318
+ args.hidden_dropout,
319
+ config,
320
+ add_position_embedding=(args.add_position_embedding and (not args.fix_position_emb_redundant_alloc)),
321
+ num_tokentypes=num_tokentypes,
322
+ embedding_weights_in_fp32=args.embedding_weights_in_fp32,
323
+ forward_fn=_logits_helper,
324
+ tied_weight_attr='word_embeddings_weight')
325
+ )
326
+
327
+ # Convert to fp32 if needed
328
+ if args.fp16 or args.bf16:
329
+ self.specs.append(float16_to_fp32)
330
+
331
+ # for selective, use --recompute-activations or --recompute-granularity='selective'
332
+ # for full, use --recompute-granularity='full' --recompute-method='uniform' or
333
+ # --checkpoint-activations
334
+ if args.checkpoint_activations:
335
+ interval = args.checkpoint_num_layers
336
+ elif args.recompute_granularity == "full" and args.recompute_method == 'uniform':
337
+ # deepspeed's pipeline doesn't support the block recompute method
338
+ interval = args.recompute_num_layers
339
+ else:
340
+ interval = 0
341
+
342
+ from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology
343
+ topo = PipeModelDataParallelTopology(num_pp=mpu.get_pipeline_model_parallel_world_size(),
344
+ num_mp=mpu.get_tensor_model_parallel_world_size(),
345
+ num_dp=mpu.get_data_parallel_world_size())
346
+
347
+ super().__init__(layers=self.specs,
348
+ loss_fn=CrossEntropy,
349
+ topology=topo,
350
+ activation_checkpoint_interval=interval,
351
+ partition_method='type:transformer')
352
+
353
+ @staticmethod
354
+ def _get_vocab_param_patterns():
355
+ args = get_args()
356
+ if args.untie_embeddings_and_output_weights:
357
+ patterns = [
358
+ r"\d+.word_embeddings.weight",
359
+ r"\d+.lm_head.weight"
360
+ ]
361
+ else:
362
+ patterns = [
363
+ r"tied_modules.embed.word_embeddings.weight"
364
+ ]
365
+ return patterns
366
+
367
+ def _get_pp_replicated_param_patterns(self):
368
+ args = get_args()
369
+ if args.untie_embeddings_and_output_weights:
370
+ return []
371
+ patterns = self._get_vocab_param_patterns()
372
+ if args.add_position_embedding:
373
+ patterns.append(r"tied_modules.embed.position_embeddings.weight")
374
+ return patterns
375
+
376
+ @staticmethod
377
+ def _get_tp_replicated_param_patterns():
378
+ args = get_args()
379
+ patterns = [
380
+ r"\d+.input_layernorm.weight",
381
+ r"\d+.post_attention_layernorm.weight",
382
+ r"\d+.weight",
383
+ ]
384
+ if args.add_position_embedding:
385
+ patterns.append(r"tied_modules.embed.position_embeddings.weight")
386
+ if args.add_bias_linear:
387
+ patterns.extend([
388
+ r"\d+.self_attention.dense.bias",
389
+ r"\d+.mlp.dense_4h_to_h.bias",
390
+ ])
391
+ if args.normalization == 'layernorm':
392
+ patterns.extend([
393
+ r"\d+.input_layernorm.bias",
394
+ r"\d+.post_attention_layernorm.bias",
395
+ r"\d+.bias",
396
+ ])
397
+ return patterns
398
+
399
+ @staticmethod
400
+ def _get_row_parallel_param_patterns():
401
+ return [
402
+ r"\d+.mlp.dense_4h_to_h.weight",
403
+ r"\d+.self_attention.dense.weight",
404
+ ]
405
+
406
+ @staticmethod
407
+ def _get_swiglu_col_parallel_param_patterns():
408
+ args = get_args()
409
+ if not args.swiglu:
410
+ return []
411
+ patterns = [
412
+ r"\d+.mlp.dense_h_to_4h.weight",
413
+ ]
414
+ if args.add_bias_linear:
415
+ patterns.append(r"\d+.mlp.dense_h_to_4h.bias")
416
+ return patterns
417
+
418
+
419
+ def universal_checkpoint_info(self):
420
+ info = dict()
421
+ if DS_UNIVERSAL_CHECKPOINT_INFO:
422
+ # Vocabulary parameters (embeddings) that require special handling due to padding.
423
+ info[VOCABULARY_PARAMETER_PATTERNS] = self._get_vocab_param_patterns()
424
+
425
+ # Replicated (shared) parameters on the pipeline dimension
426
+ info[PIPELINE_REPLICATED_PARAMETER_PATTERNS] = self._get_pp_replicated_param_patterns()
427
+
428
+ # Parameter slices that should be averaged not concatenated.
429
+ info[TP_REPLICATED_PARAMETER_PATTERNS] = self._get_tp_replicated_param_patterns()
430
+
431
+ # Parameter that are sliced on the row dimension
432
+ info[PARAMETER_WITH_ROW_PARALLELISM_PATTERNS] = self._get_row_parallel_param_patterns()
433
+
434
+ # SWIGLU parameters are first sliced on dim=0 to tp slices
435
+ # Then, each tp slice is chunked into 2 to create the linear layers L1, L2 used for silu(L1(x)) * L2(x))
436
+ info[PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0] = self._get_swiglu_col_parallel_param_patterns()
437
+ return info
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/language_model.py ADDED
@@ -0,0 +1,723 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """Transformer based language model."""
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+
9
+ from megatron import get_args
10
+ from megatron.core import mpu, tensor_parallel
11
+ from megatron.core.enums import ModelType
12
+
13
+ from .enums import AttnMaskType, LayerType
14
+ from .module import MegatronModule
15
+ from .rotary_pos_embedding import apply_rotary_pos_emb, RotaryEmbedding
16
+ from .transformer import ParallelTransformer
17
+ from .utils import get_linear_layer
18
+ from .utils import init_method_normal, scaled_init_method_normal, gather_and_init
19
+
20
+
21
+ def parallel_lm_logits(input_, word_embeddings_weight, parallel_output,
22
+ bias=None):
23
+ """LM logits using word embedding weights."""
24
+ args = get_args()
25
+ # Parallel logits.
26
+ if args.async_tensor_model_parallel_allreduce or\
27
+ args.sequence_parallel:
28
+ input_parallel = input_
29
+ model_parallel = mpu.get_tensor_model_parallel_world_size() > 1
30
+ async_grad_allreduce = args.async_tensor_model_parallel_allreduce and \
31
+ model_parallel and not args.sequence_parallel
32
+ else:
33
+ input_parallel = tensor_parallel.copy_to_tensor_model_parallel_region(input_)
34
+ async_grad_allreduce = False
35
+
36
+ # Matrix multiply.
37
+ logits_parallel = tensor_parallel.linear_with_grad_accumulation_and_async_allreduce(
38
+ input=input_parallel,
39
+ weight=word_embeddings_weight,
40
+ bias=bias,
41
+ gradient_accumulation_fusion=args.gradient_accumulation_fusion,
42
+ async_grad_allreduce=async_grad_allreduce,
43
+ sequence_parallel=args.sequence_parallel)
44
+ # Gather if needed.
45
+
46
+ if parallel_output:
47
+ return logits_parallel
48
+
49
+ return tensor_parallel.gather_from_tensor_model_parallel_region(logits_parallel)
50
+
51
+
52
+ def get_language_model(config, num_tokentypes, add_pooler,
53
+ encoder_attn_mask_type,
54
+ add_encoder=True,
55
+ add_decoder=False,
56
+ decoder_attn_mask_type=AttnMaskType.causal,
57
+ pre_process=True, post_process=True, num_experts=[1]):
58
+ """Build language model and return along with the key to save."""
59
+ args = get_args()
60
+ if config.init_method is None:
61
+ config.init_method = init_method_normal(config.init_method_std)
62
+
63
+ if config.output_layer_init_method is None:
64
+ config.output_layer_init_method = scaled_init_method_normal(config.init_method_std,
65
+ config.num_layers)
66
+
67
+ # Language model.
68
+ language_model = TransformerLanguageModel(
69
+ config,
70
+ encoder_attn_mask_type,
71
+ num_tokentypes=num_tokentypes,
72
+ add_encoder=add_encoder,
73
+ add_decoder=add_decoder,
74
+ decoder_attn_mask_type=decoder_attn_mask_type,
75
+ add_pooler=add_pooler,
76
+ pre_process=pre_process,
77
+ post_process=post_process,
78
+ num_experts=num_experts)
79
+ # key used for checkpoints.
80
+ language_model_key = 'language_model'
81
+
82
+ return language_model, language_model_key
83
+
84
+
85
+ class Pooler(MegatronModule):
86
+ """Pooler layer.
87
+
88
+ Pool hidden states of a specific token (for example start of the
89
+ sequence) and add a linear transformation followed by a tanh.
90
+
91
+ Arguments:
92
+ hidden_size: hidden size
93
+ init_method: weight initialization method for the linear layer.
94
+ bias is set to zero.
95
+ """
96
+
97
+ def __init__(self, hidden_size, init_method):
98
+ super(Pooler, self).__init__()
99
+ args = get_args()
100
+ self.dense = get_linear_layer(hidden_size, hidden_size, init_method, gather_params_on_init=args.zero_stage == 3)
101
+ self.sequence_parallel = args.sequence_parallel
102
+
103
+
104
+ def forward(self, hidden_states, sequence_index=0):
105
+ # hidden_states: [s, b, h]
106
+ # sequence_index: index of the token to pool.
107
+
108
+ # gather data along sequence dimensions
109
+ # same pooler is run on all tensor parallel nodes
110
+ if self.sequence_parallel:
111
+ hidden_states = tensor_parallel.gather_from_sequence_parallel_region(
112
+ hidden_states,
113
+ tensor_parallel_output_grad=False)
114
+
115
+ pooled = hidden_states[sequence_index, :, :]
116
+ pooled = self.dense(pooled)
117
+ pooled = torch.tanh(pooled)
118
+ return pooled
119
+
120
+
121
+ class Embedding(MegatronModule):
122
+ """Language model embeddings.
123
+
124
+ Arguments:
125
+ hidden_size: hidden size
126
+ vocab_size: vocabulary size
127
+ max_sequence_length: maximum size of sequence. This
128
+ is used for positional embedding
129
+ embedding_dropout_prob: dropout probability for embeddings
130
+ init_method: weight initialization method
131
+ num_tokentypes: size of the token-type embeddings. 0 value
132
+ will ignore this embedding
133
+ embedding_weights_in_fp32: casts word embedding weights to
134
+ fp32 before sampling. Required to
135
+ maintain reproducibility when
136
+ training in bf16.
137
+ """
138
+
139
+ def __init__(self,
140
+ hidden_size,
141
+ vocab_size,
142
+ max_sequence_length,
143
+ embedding_dropout_prob,
144
+ config,
145
+ add_position_embedding=True,
146
+ num_tokentypes=0,
147
+ embedding_weights_in_fp32=False):
148
+ super(Embedding, self).__init__()
149
+
150
+ self.hidden_size = hidden_size
151
+ self.init_method = config.init_method
152
+ self.num_tokentypes = num_tokentypes
153
+
154
+ args = get_args()
155
+
156
+ # Word embeddings (parallel).
157
+ self.embedding_weights_in_fp32 = embedding_weights_in_fp32
158
+ self.params_dtype = args.params_dtype
159
+ self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
160
+ vocab_size, self.hidden_size, config=config, init_method=config.init_method)
161
+ self._word_embeddings_key = 'word_embeddings'
162
+
163
+ # Position embedding (serial).
164
+ self.add_position_embedding = add_position_embedding
165
+ if self.add_position_embedding:
166
+ self._position_embeddings_key = 'position_embeddings'
167
+ if args.sequence_parallel:
168
+ self.position_embeddings = tensor_parallel.layers.SequenceParallelPositionEmbedding(
169
+ max_sequence_length, self.hidden_size)
170
+ # Initialize the position embeddings.
171
+ self.init_method(self.position_embeddings.local_embeddings.weight)
172
+ else:
173
+ self.position_embeddings = torch.nn.Embedding(
174
+ max_sequence_length, self.hidden_size)
175
+ # Initialize the position embeddings.
176
+ if args.perform_initialization:
177
+ if args.zero_stage == 3:
178
+ gather_and_init(self.position_embeddings.weight, self.init_method)
179
+ else:
180
+ self.init_method(self.position_embeddings.weight)
181
+
182
+ # Token type embedding.
183
+ # Add this as an optional field that can be added through
184
+ # method call so we can load a pretrain model without
185
+ # token types and add them as needed.
186
+ self._tokentype_embeddings_key = 'tokentype_embeddings'
187
+ if self.num_tokentypes > 0:
188
+ self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes,
189
+ self.hidden_size)
190
+ # Initialize the token-type embeddings.
191
+ if args.perform_initialization:
192
+ if args.zero_stage == 3:
193
+ gather_and_init(self.tokentype_embeddings.weight, self.init_method)
194
+ else:
195
+ self.init_method(self.tokentype_embeddings.weight)
196
+ else:
197
+ self.tokentype_embeddings = None
198
+
199
+ self.fp32_residual_connection = args.fp32_residual_connection
200
+ self.sequence_parallel = args.sequence_parallel
201
+ # Embeddings dropout
202
+ self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
203
+
204
+ def zero_parameters(self):
205
+ """Zero out all parameters in embedding."""
206
+ self.word_embeddings.weight.data.fill_(0)
207
+ self.word_embeddings.weight.shared = True
208
+ if self.add_position_embedding:
209
+ self.position_embeddings.weight.data.fill_(0)
210
+ self.position_embeddings.weight.shared = True
211
+ if self.num_tokentypes > 0:
212
+ self.tokentype_embeddings.weight.data.fill_(0)
213
+ self.tokentype_embeddings.weight.shared = True
214
+
215
+ def add_tokentype_embeddings(self, num_tokentypes):
216
+ """Add token-type embedding. This function is provided so we can add
217
+ token-type embeddings in case the pretrained model does not have it.
218
+ This allows us to load the model normally and then add this embedding.
219
+ """
220
+ if self.tokentype_embeddings is not None:
221
+ raise Exception('tokentype embeddings is already initialized')
222
+ if torch.distributed.get_rank() == 0:
223
+ print('adding embedding for {} tokentypes'.format(num_tokentypes),
224
+ flush=True)
225
+ self.num_tokentypes = num_tokentypes
226
+ self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes,
227
+ self.hidden_size)
228
+ # Initialize the token-type embeddings.
229
+ args = get_args()
230
+ self.init_method(self.tokentype_embeddings.weight)
231
+
232
+ def forward(self, input_ids, position_ids, tokentype_ids=None):
233
+ # Embeddings.
234
+ if self.embedding_weights_in_fp32:
235
+ self.word_embeddings = self.word_embeddings.to(torch.float32)
236
+ words_embeddings = self.word_embeddings(input_ids)
237
+ if self.embedding_weights_in_fp32:
238
+ words_embeddings = words_embeddings.to(self.params_dtype)
239
+ self.word_embeddings = self.word_embeddings.to(self.params_dtype)
240
+ if self.add_position_embedding:
241
+ position_embeddings = self.position_embeddings(position_ids)
242
+ embeddings = words_embeddings + position_embeddings
243
+ else:
244
+ embeddings = words_embeddings
245
+
246
+ if tokentype_ids is not None:
247
+ assert self.tokentype_embeddings is not None
248
+ embeddings = embeddings + self.tokentype_embeddings(tokentype_ids)
249
+ else:
250
+ assert self.tokentype_embeddings is None
251
+
252
+ # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
253
+ embeddings = embeddings.transpose(0, 1).contiguous()
254
+
255
+ # If the input flag for fp32 residual connection is set, convert for float.
256
+ if self.fp32_residual_connection:
257
+ embeddings = embeddings.float()
258
+
259
+ # Dropout.
260
+ if self.sequence_parallel:
261
+ # already partition sequence, do not need scatter_to_sequence_parallel_region ?
262
+ embeddings = tensor_parallel.scatter_to_sequence_parallel_region(embeddings)
263
+ with tensor_parallel.get_cuda_rng_tracker().fork():
264
+ embeddings = self.embedding_dropout(embeddings)
265
+ else:
266
+ embeddings = self.embedding_dropout(embeddings)
267
+
268
+ return embeddings
269
+
270
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
271
+ """For easy load."""
272
+
273
+ state_dict_ = {}
274
+ state_dict_[self._word_embeddings_key] \
275
+ = self.word_embeddings.state_dict(prefix=prefix,
276
+ keep_vars=keep_vars)
277
+ if self.add_position_embedding:
278
+ state_dict_[self._position_embeddings_key] \
279
+ = self.position_embeddings.state_dict(prefix=prefix,
280
+ keep_vars=keep_vars)
281
+ if self.num_tokentypes > 0:
282
+ state_dict_[self._tokentype_embeddings_key] \
283
+ = self.tokentype_embeddings.state_dict(prefix=prefix,
284
+ keep_vars=keep_vars)
285
+
286
+ return state_dict_
287
+
288
+ def load_state_dict(self, state_dict, strict=True):
289
+ """Customized load."""
290
+
291
+ # Word embedding.
292
+ if self._word_embeddings_key in state_dict:
293
+ state_dict_ = state_dict[self._word_embeddings_key]
294
+ else:
295
+ # for backward compatibility.
296
+ state_dict_ = {}
297
+ for key in state_dict.keys():
298
+ if 'word_embeddings' in key:
299
+ state_dict_[key.split('word_embeddings.')[1]] \
300
+ = state_dict[key]
301
+ self.word_embeddings.load_state_dict(state_dict_, strict=strict)
302
+
303
+ # Position embedding.
304
+ if self.add_position_embedding:
305
+ if self._position_embeddings_key in state_dict:
306
+ state_dict_ = state_dict[self._position_embeddings_key]
307
+ else:
308
+ # for backward compatibility.
309
+ state_dict_ = {}
310
+ for key in state_dict.keys():
311
+ if 'position_embeddings' in key:
312
+ state_dict_[key.split('position_embeddings.')[1]] \
313
+ = state_dict[key]
314
+ self.position_embeddings.load_state_dict(state_dict_, strict=strict)
315
+
316
+ # Tokentype embedding.
317
+ if self.num_tokentypes > 0:
318
+ state_dict_ = {}
319
+ if self._tokentype_embeddings_key in state_dict:
320
+ state_dict_ = state_dict[self._tokentype_embeddings_key]
321
+ else:
322
+ # for backward compatibility.
323
+ for key in state_dict.keys():
324
+ if 'tokentype_embeddings' in key:
325
+ state_dict_[key.split('tokentype_embeddings.')[1]] \
326
+ = state_dict[key]
327
+ if len(state_dict_.keys()) > 0:
328
+ self.tokentype_embeddings.load_state_dict(state_dict_,
329
+ strict=strict)
330
+ else:
331
+ print('***WARNING*** expected tokentype embeddings in the '
332
+ 'checkpoint but could not find it', flush=True)
333
+
334
+
335
+ class EmbeddingPipe(Embedding):
336
+
337
+ def forward(self, inputs, **kwargs):
338
+ if not hasattr(self, '_args'):
339
+ self._args = get_args()
340
+
341
+ input_ids = inputs[0]
342
+ position_ids = inputs[1]
343
+ if hasattr(self._args, 'attn_mask'):
344
+ attention_mask = None
345
+ else:
346
+ attention_mask = inputs[2]
347
+
348
+ if len(inputs) == 4:
349
+ tokentype_ids = inputs[3]
350
+ else:
351
+ tokentype_ids = None
352
+
353
+ embeddings = super().forward(input_ids, position_ids, tokentype_ids=tokentype_ids)
354
+
355
+ # If cmd args has attn_mask, we don't forward it as an activation.
356
+ if hasattr(self._args, 'attn_mask'):
357
+ return embeddings
358
+ else:
359
+ assert False
360
+ return embeddings, attention_mask
361
+
362
+
363
+ @property
364
+ def word_embeddings_weight(self):
365
+ """Easy accessory for the DeepSpeed pipeline engine to tie embeddings across stages."""
366
+ return self.word_embeddings.weight
367
+
368
+
369
+ class TransformerLanguageModel(MegatronModule):
370
+ """Transformer language model.
371
+
372
+ Arguments:
373
+ transformer_hparams: transformer hyperparameters
374
+ vocab_size: vocabulary size
375
+ max_sequence_length: maximum size of sequence. This
376
+ is used for positional embedding
377
+ embedding_dropout_prob: dropout probability for embeddings
378
+ num_tokentypes: size of the token-type embeddings. 0 value
379
+ will ignore this embedding
380
+ """
381
+
382
+ def __init__(self,
383
+ config,
384
+ encoder_attn_mask_type,
385
+ num_tokentypes=0,
386
+ add_encoder=True,
387
+ add_decoder=False,
388
+ decoder_attn_mask_type=AttnMaskType.causal,
389
+ add_pooler=False,
390
+ pre_process=True,
391
+ post_process=True,
392
+ num_experts=[1]):
393
+ args = get_args()
394
+ # TODO: passing share_embeddings_and_output_weights=False will not work correctly for T5 and embeddings will not be synced. Fix later for T5.
395
+ if args.untie_embeddings_and_output_weights: assert not add_decoder
396
+ super(TransformerLanguageModel, self).__init__(share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights)
397
+
398
+ self.pre_process = pre_process
399
+ self.post_process = post_process
400
+ self.hidden_size = config.hidden_size
401
+ self.num_tokentypes = num_tokentypes
402
+ self.init_method = config.init_method
403
+ self.add_encoder = add_encoder
404
+ self.encoder_attn_mask_type = encoder_attn_mask_type
405
+ self.add_decoder = add_decoder
406
+ self.decoder_attn_mask_type = decoder_attn_mask_type
407
+ self.add_pooler = add_pooler
408
+ self.encoder_hidden_state = None
409
+ self.add_retriever = args.retro_add_retriever
410
+ self.untie_embeddings_and_output_weights = args.untie_embeddings_and_output_weights
411
+ self.num_experts = num_experts
412
+
413
+ # Embeddings.
414
+ if self.pre_process:
415
+ self.embedding = Embedding(self.hidden_size,
416
+ args.padded_vocab_size,
417
+ args.max_position_embeddings,
418
+ args.hidden_dropout,
419
+ config,
420
+ args.add_position_embedding,
421
+ self.num_tokentypes,
422
+ args.embedding_weights_in_fp32)
423
+ self._embedding_key = 'embedding'
424
+
425
+ # Rotary positional embeddings
426
+ self.use_rotary_position_embeddings = \
427
+ args.use_rotary_position_embeddings
428
+ if args.use_rotary_position_embeddings:
429
+ self.seq_length = args.seq_length
430
+ rotary_dim = args.hidden_size // args.num_attention_heads \
431
+ if args.kv_channels is None else args.kv_channels
432
+
433
+ if args.rotary_percent < 1.0:
434
+ rotary_dim = int(rotary_dim * args.rotary_percent)
435
+
436
+ # partial rotary embeddings, which is better than full rotary
437
+ # Wang and Komatsuzaki et al
438
+ # https://github.com/kingoflolz/mesh-transformer-jax/
439
+ self.rotary_pos_emb = RotaryEmbedding(rotary_dim)
440
+
441
+ # Encoder (usually set to True, False if part of an encoder-decoder
442
+ # architecture and in encoder-only stage).
443
+ if self.add_encoder:
444
+ self.encoder = ParallelTransformer(
445
+ config,
446
+ model_type=args.model_type if not args.retro_add_retriever \
447
+ else ModelType.retro_decoder,
448
+ self_attn_mask_type=self.encoder_attn_mask_type,
449
+ pre_process=self.pre_process,
450
+ post_process=self.post_process,
451
+ num_experts=self.num_experts
452
+ )
453
+ self._encoder_key = 'encoder'
454
+ else:
455
+ self.encoder = None
456
+
457
+ # Decoder (usually set to False, True if part of an encoder-decoder
458
+ # architecture and in decoder-only stage).
459
+ if self.add_decoder:
460
+ self.decoder = ParallelTransformer(
461
+ config,
462
+ model_type=args.model_type,
463
+ layer_type=LayerType.decoder,
464
+ self_attn_mask_type=self.decoder_attn_mask_type,
465
+ pre_process=self.pre_process,
466
+ post_process=self.post_process,
467
+ num_experts=self.num_experts)
468
+ self._decoder_key = 'decoder'
469
+ else:
470
+ self.decoder = None
471
+
472
+ if self.post_process:
473
+ # Pooler.
474
+ if self.add_pooler:
475
+ self.pooler = Pooler(self.hidden_size, self.init_method)
476
+ self._pooler_key = 'pooler'
477
+
478
+ if self.untie_embeddings_and_output_weights:
479
+ self.output_layer = tensor_parallel.ColumnParallelLinear(
480
+ args.hidden_size,
481
+ args.padded_vocab_size,
482
+ config=config,
483
+ init_method=self.init_method,
484
+ bias=False) # Setting bias to False always to keep it consistent with embedding tying that also does not have a bias.
485
+ self._output_layer_key = 'output_layer'
486
+
487
+ def set_input_tensor(self, input_tensor):
488
+ """ See megatron.model.transformer.set_input_tensor()"""
489
+
490
+ # This is usually handled in schedules.py but some inference code still
491
+ # gives us non-lists or None
492
+ if not isinstance(input_tensor, list):
493
+ input_tensor = [input_tensor]
494
+
495
+ if self.add_encoder and self.add_decoder:
496
+ assert len(input_tensor) == 1, \
497
+ 'input_tensor should only be length 1 for stage with both encoder and decoder'
498
+ self.encoder.set_input_tensor(input_tensor[0])
499
+ elif self.add_encoder:
500
+ assert len(input_tensor) == 1, \
501
+ 'input_tensor should only be length 1 for stage with only encoder'
502
+ self.encoder.set_input_tensor(input_tensor[0])
503
+ elif self.add_decoder:
504
+ if len(input_tensor) == 2:
505
+ self.decoder.set_input_tensor(input_tensor[0])
506
+ self.encoder_hidden_state = input_tensor[1]
507
+ elif len(input_tensor) == 1:
508
+ self.decoder.set_input_tensor(None)
509
+ self.encoder_hidden_state = input_tensor[0]
510
+ else:
511
+ raise Exception('input_tensor must have either length 1 or 2')
512
+ else:
513
+ raise Exception('Stage must have at least either encoder or decoder')
514
+
515
+ def forward(self, enc_input_ids, enc_position_ids, enc_attn_mask,
516
+ dec_input_ids=None, dec_position_ids=None, dec_attn_mask=None,
517
+ retriever_input_ids=None,
518
+ retriever_position_ids=None,
519
+ retriever_attn_mask=None,
520
+ enc_dec_attn_mask=None, tokentype_ids=None,
521
+ inference_params=None,
522
+ pooling_sequence_index=0,
523
+ enc_hidden_states=None, output_enc_hidden=False):
524
+ args = get_args()
525
+ # Encoder embedding.
526
+ if self.pre_process:
527
+ encoder_input = self.embedding(enc_input_ids, enc_position_ids,
528
+ tokentype_ids=tokentype_ids)
529
+ else:
530
+ encoder_input = None
531
+
532
+ # Retriever embedding.
533
+ if self.add_retriever and self.pre_process:
534
+ retriever_input = self.embedding(retriever_input_ids,
535
+ retriever_position_ids,
536
+ tokentype_ids=tokentype_ids)
537
+ else:
538
+ retriever_input = None
539
+
540
+ # Rotary positional embeddings
541
+ rotary_pos_emb = None
542
+ if self.use_rotary_position_embeddings:
543
+ if inference_params is not None:
544
+ rotary_pos_emb = \
545
+ self.rotary_pos_emb(inference_params.max_sequence_len)
546
+ else:
547
+ if args.curriculum_learning_legacy or args.data_efficiency_curriculum_learning:
548
+ rotary_pos_emb = self.rotary_pos_emb(args.curriculum_seqlen)
549
+ else:
550
+ rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
551
+
552
+ # Run encoder.
553
+ if enc_hidden_states is None:
554
+ if self.encoder is not None:
555
+ encoder_output, *encoder_moe_losses = self.encoder(
556
+ encoder_input,
557
+ enc_attn_mask,
558
+ retriever_input=retriever_input,
559
+ retriever_attn_mask=retriever_attn_mask,
560
+ inference_params=inference_params,
561
+ rotary_pos_emb=rotary_pos_emb)
562
+ else:
563
+ encoder_output = self.encoder_hidden_state
564
+ else:
565
+ encoder_output, encoder_moe_losses = enc_hidden_states.to(encoder_input.dtype), []
566
+
567
+ if self.post_process:
568
+ if self.add_pooler:
569
+ pooled_output = self.pooler(encoder_output,
570
+ pooling_sequence_index)
571
+
572
+ # output_enc_hidden refers to when we just need the encoder's
573
+ # output. For example, it is helpful to compute
574
+ # similarity between two sequences by average pooling
575
+ if not self.add_decoder or output_enc_hidden:
576
+ if self.add_pooler and self.post_process:
577
+ return encoder_output, pooled_output, encoder_moe_losses
578
+ else:
579
+ return encoder_output, encoder_moe_losses
580
+
581
+ # Decoder embedding.
582
+ if self.pre_process:
583
+ decoder_input = self.embedding(dec_input_ids,
584
+ dec_position_ids)
585
+ else:
586
+ decoder_input = None
587
+
588
+ # Run decoder.
589
+ decoder_output, *decoder_moe_losses = self.decoder(
590
+ decoder_input,
591
+ dec_attn_mask,
592
+ encoder_output=encoder_output,
593
+ enc_dec_attn_mask=enc_dec_attn_mask,
594
+ inference_params=inference_params,
595
+ rotary_pos_emb=rotary_pos_emb)
596
+
597
+ if self.add_pooler and self.post_process:
598
+ return decoder_output, encoder_output, pooled_output, decoder_moe_losses, encoder_moe_losses
599
+ else:
600
+ return decoder_output, encoder_output, decoder_moe_losses, encoder_moe_losses
601
+
602
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
603
+ """For easy load."""
604
+ args = get_args()
605
+ state_dict_ = {}
606
+ moe_state_dict = {}
607
+ if self.pre_process:
608
+ state_dict_[self._embedding_key] \
609
+ = self.embedding.state_dict_for_save_checkpoint(prefix=prefix,
610
+ keep_vars=keep_vars)
611
+ if self.add_encoder:
612
+ encoder_state_dict = self.encoder.state_dict_for_save_checkpoint(
613
+ prefix=prefix, keep_vars=keep_vars)
614
+ if args.random_ltd:
615
+ # When using random-LTD, it is required to call remove_random_ltd_state_dict
616
+ # during model checkpoint saving to transfer the random-LTD-wrapped
617
+ # layers back to original layers. This will help to remove the dependency
618
+ # to random-LTD inside the checkpoint, so that during evaluation or
619
+ # finetuning of the checkpoint there is no need to depend on random-LTD
620
+ # again.
621
+ from deepspeed.runtime.data_pipeline.data_routing.helper import remove_random_ltd_state_dict
622
+ encoder_state_dict = remove_random_ltd_state_dict(encoder_state_dict)
623
+ # MoE states need to be handled separately by DeepSpeed engine, thus
624
+ # moving them to the top level dictionary
625
+ # If components other than encoder may contain MoE states, need to add
626
+ # the same logic
627
+ for key in list(encoder_state_dict.keys()):
628
+ if 'expert' in key and 'moe.gate.wg.weight' not in key:
629
+ moe_state_dict[self._encoder_key+key] = encoder_state_dict.pop(key)
630
+ state_dict_[self._encoder_key] = encoder_state_dict
631
+
632
+ if self.post_process:
633
+ if self.add_pooler:
634
+ state_dict_[self._pooler_key] \
635
+ = self.pooler.state_dict_for_save_checkpoint(prefix=prefix,
636
+ keep_vars=keep_vars)
637
+ if self.untie_embeddings_and_output_weights:
638
+ state_dict_[self._output_layer_key] \
639
+ = self.output_layer.state_dict(prefix=prefix, keep_vars=keep_vars)
640
+
641
+ if self.add_decoder:
642
+ state_dict_[self._decoder_key] \
643
+ = self.decoder.state_dict_for_save_checkpoint(prefix=prefix,
644
+ keep_vars=keep_vars)
645
+
646
+ state_dict_["moe_state_dict"] = moe_state_dict
647
+ return state_dict_
648
+
649
+ def load_state_dict(self, state_dict, strict=True):
650
+ """Customized load."""
651
+
652
+ # Embedding.
653
+ if self.pre_process:
654
+ if self._embedding_key in state_dict:
655
+ state_dict_ = state_dict[self._embedding_key]
656
+ else:
657
+ # for backward compatibility.
658
+ state_dict_ = {}
659
+ for key in state_dict.keys():
660
+ if '_embeddings' in key:
661
+ state_dict_[key] = state_dict[key]
662
+ self.embedding.load_state_dict(state_dict_, strict=strict)
663
+
664
+ # Encoder.
665
+ if self.add_encoder:
666
+ if self._encoder_key in state_dict:
667
+ state_dict_ = state_dict[self._encoder_key]
668
+ # For backward compatibility.
669
+ elif 'transformer' in state_dict:
670
+ state_dict_ = state_dict['transformer']
671
+ else:
672
+ # For backward compatibility.
673
+ state_dict_ = {}
674
+ for key in state_dict.keys():
675
+ if 'transformer.' in key:
676
+ state_dict_[key.split('transformer.')[1]] = state_dict[key]
677
+
678
+ # For backward compatibility.
679
+ # Somehow this backward compatibility could be wrong: sometimes
680
+ # '.attention.' is the actual key used so should not be replaced. Thus
681
+ # added another logic to only replace if the key does not match
682
+ state_dict_self_attention = {}
683
+ encoder_state_dict_keys = list(self.encoder.state_dict().keys())
684
+ for key in state_dict_.keys():
685
+ if '.attention.' in key and key not in encoder_state_dict_keys:
686
+ state_dict_self_attention[key.replace(".attention.",
687
+ ".self_attention.")] = state_dict_[key]
688
+ else:
689
+ state_dict_self_attention[key] = state_dict_[key]
690
+ state_dict_ = state_dict_self_attention
691
+
692
+ # Gather encoder MoE states
693
+ if "moe_state_dict" in state_dict:
694
+ for key in list(state_dict["moe_state_dict"].keys()):
695
+ if self._encoder_key in key:
696
+ key_list = key.split('.')
697
+ while key_list[0] != 'encoder':
698
+ key_list.pop(0)
699
+ key_list.pop(0)
700
+ actual_key = '.'.join(key_list)
701
+ state_dict_[actual_key] = state_dict["moe_state_dict"].pop(key)
702
+ if len(state_dict["moe_state_dict"]) == 0:
703
+ del state_dict["moe_state_dict"]
704
+ self.encoder.load_state_dict(state_dict_, strict=strict)
705
+
706
+ # Pooler.
707
+ if self.post_process:
708
+ if self.add_pooler:
709
+ assert 'pooler' in state_dict, \
710
+ 'could not find data for pooler in the checkpoint'
711
+ self.pooler.load_state_dict(state_dict[self._pooler_key],
712
+ strict=strict)
713
+ if self.untie_embeddings_and_output_weights:
714
+ assert 'output_layer' in state_dict, \
715
+ 'could not find data for output_layer in the checkpoint'
716
+ self.output_layer.load_state_dict(state_dict[self._output_layer_key],
717
+ strict=strict)
718
+ # Decoder.
719
+ if self.add_decoder:
720
+ assert 'decoder' in state_dict, \
721
+ 'could not find data for pooler in the checkpoint'
722
+ self.decoder.load_state_dict(state_dict[self._decoder_key],
723
+ strict=strict)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/llama_model.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """LLaMA model."""
18
+
19
+ import torch
20
+
21
+ from megatron import get_args
22
+ from megatron import mpu
23
+ from .module import MegatronModule, fp32_to_float16
24
+
25
+ from .enums import AttnMaskType
26
+ from megatron.enums import PositionEmbeddingType
27
+ from .language_model import parallel_lm_logits
28
+ from .language_model import get_language_model
29
+ from .utils import init_method_normal, scaled_init_method_normal, WrapName
30
+
31
+ from deepspeed.pipe import PipelineModule, LayerSpec, TiedLayerSpec
32
+ from megatron.model import RMSNorm, LayerNorm, CrossEntropy
33
+ from megatron.model.module import float16_to_fp32
34
+ from .language_model import EmbeddingPipe
35
+ from .transformer import ParallelTransformerLayerPipe
36
+
37
+
38
+ def logits_loss(lm_output, labels, fp16_lm_cross_entropy):
39
+
40
+ if labels is None:
41
+ return lm_output
42
+ else:
43
+ if fp16_lm_cross_entropy:
44
+ assert lm_output.dtype == torch.half
45
+ loss = mpu.vocab_parallel_cross_entropy(lm_output, labels)
46
+ else:
47
+ loss = mpu.vocab_parallel_cross_entropy(lm_output.float(), labels)
48
+ return loss
49
+
50
+
51
+ class LLaMAModel(MegatronModule):
52
+ """LLaMA Language model."""
53
+
54
+ def __init__(self,
55
+ num_tokentypes=0,
56
+ parallel_output=True,
57
+ pre_process=True,
58
+ post_process=True,
59
+ return_moe_loss=True):
60
+ super(LLaMAModel, self).__init__()
61
+ args = get_args()
62
+
63
+ self.parallel_output = parallel_output
64
+ self.pre_process = pre_process
65
+ self.post_process = post_process
66
+ self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
67
+ self.return_moe_loss = return_moe_loss
68
+ if args.no_scaled_init:
69
+ scaled_init_method = init_method_normal(args.init_method_std)
70
+ else:
71
+ scaled_init_method = scaled_init_method_normal(args.init_method_std, args.num_layers)
72
+ assert args.position_embedding_type == PositionEmbeddingType.rotary, 'LLaMA should use rotary positional embeddings'
73
+ self.language_model, self._language_model_key = get_language_model(
74
+ num_tokentypes=num_tokentypes,
75
+ add_pooler=False,
76
+ encoder_attn_mask_type=AttnMaskType.causal,
77
+ init_method=init_method_normal(args.init_method_std),
78
+ scaled_init_method=scaled_init_method,
79
+ num_experts=args.num_experts,
80
+ pre_process=self.pre_process,
81
+ post_process=self.post_process,
82
+ use_position=False)
83
+
84
+ self.vocab_projection = mpu.layers.VocabParallelProjection(
85
+ args.padded_vocab_size,
86
+ args.hidden_size,
87
+ parallel_output=True,
88
+ init_method=init_method_normal(args.init_method_std)
89
+ )
90
+
91
+ def set_input_tensor(self, input_tensor):
92
+ """See megatron.model.transformer.set_input_tensor()"""
93
+ self.language_model.set_input_tensor(input_tensor)
94
+
95
+ def forward(self, input_ids, position_ids, attention_mask, labels=None,
96
+ tokentype_ids=None, layer_past=None, get_key_value=False,
97
+ forward_method_parallel_output=None, curriculum_seqlen=None):
98
+ args = get_args()
99
+ if curriculum_seqlen is not None:
100
+ args.curriculum_seqlen = curriculum_seqlen
101
+ if curriculum_seqlen < input_ids.size()[1]:
102
+ # seqlen-based curriculum learning
103
+ # input_ids, position_ids, labels have size [batch size, seqlen]
104
+ input_ids = input_ids[:, :curriculum_seqlen].contiguous()
105
+ position_ids = position_ids[:, :curriculum_seqlen].contiguous()
106
+ if labels is not None:
107
+ labels = labels[:, :curriculum_seqlen].contiguous()
108
+
109
+ # attention_mask has size [1, 1, seqlen, seqlen]
110
+ attention_mask = attention_mask[:, :, :curriculum_seqlen, :curriculum_seqlen].contiguous()
111
+ else:
112
+ if args.curriculum_learning:
113
+ # If got a None input, need to reset curriculum_seqlen on user side
114
+ args.curriculum_seqlen = args.seq_length
115
+
116
+ lm_output, *moe_losses = self.language_model(
117
+ input_ids,
118
+ position_ids,
119
+ attention_mask,
120
+ layer_past=layer_past,
121
+ get_key_value=get_key_value)
122
+
123
+ if self.post_process:
124
+ if get_key_value:
125
+ lm_output, presents = lm_output
126
+ lm_output = self.vocab_projection(lm_output)
127
+ lm_output = logits_loss(lm_output, labels, self.fp16_lm_cross_entropy)
128
+ if get_key_value:
129
+ lm_output = [lm_output, presents]
130
+
131
+ if self.return_moe_loss:
132
+ return (lm_output, *moe_losses)
133
+ else:
134
+ return lm_output
135
+
136
+ def state_dict_for_save_checkpoint(self, destination=None, prefix='',
137
+ keep_vars=False):
138
+
139
+ state_dict_ = {}
140
+ language_model_state_dict = self.language_model.state_dict_for_save_checkpoint(
141
+ destination, prefix, keep_vars)
142
+ # MoE states need to be handled separately by DeepSpeed engine, thus
143
+ # moving them to the top level dictionary
144
+ if "moe_state_dict" in language_model_state_dict:
145
+ for key in list(language_model_state_dict["moe_state_dict"].keys()):
146
+ state_dict_[key] = language_model_state_dict["moe_state_dict"].pop(key)
147
+ del language_model_state_dict["moe_state_dict"]
148
+ state_dict_[self._language_model_key] = language_model_state_dict
149
+ # Save word_embeddings.
150
+ if self.post_process and not self.pre_process:
151
+ state_dict_[self._word_embeddings_for_head_key] \
152
+ = self.word_embeddings.state_dict(destination, prefix, keep_vars)
153
+ return state_dict_
154
+
155
+ def load_state_dict(self, state_dict, strict=True):
156
+ """Customized load."""
157
+
158
+ # Load word_embeddings.
159
+ if self.post_process and not self.pre_process:
160
+ self.word_embeddings.load_state_dict(
161
+ state_dict[self._word_embeddings_for_head_key], strict=strict)
162
+ # Gather MoE states and move under language model
163
+ moe_state_dict = {}
164
+ for key in list(state_dict.keys()):
165
+ if 'expert' in key and 'moe.gate.wg.weight' not in key:
166
+ moe_state_dict[key] = state_dict.pop(key)
167
+ if self._language_model_key in state_dict:
168
+ state_dict = state_dict[self._language_model_key]
169
+ if len(moe_state_dict) > 0:
170
+ state_dict["moe_state_dict"] = moe_state_dict
171
+ self.language_model.load_state_dict(state_dict, strict=strict)
172
+
173
+
174
+ class LLaMAModelPipe(PipelineModule,MegatronModule):
175
+ """LLaMA Language model."""
176
+
177
+ def __init__(self,
178
+ num_tokentypes=0,
179
+ parallel_output=True):
180
+ args = get_args()
181
+ self.parallel_output = parallel_output
182
+
183
+ init_method = init_method_normal(args.init_method_std)
184
+
185
+ if args.no_scaled_init:
186
+ scaled_init_method = init_method_normal(args.init_method_std)
187
+ else:
188
+ scaled_init_method = scaled_init_method_normal(args.init_method_std, args.num_layers)
189
+
190
+ self.specs = []
191
+
192
+ def _to_float16(inputs):
193
+ if args.fp16:
194
+ return fp32_to_float16(inputs, lambda v: v.half())
195
+ elif args.bf16:
196
+ return fp32_to_float16(inputs, lambda v: v.bfloat16())
197
+ else:
198
+ return inputs
199
+
200
+ self.specs.append(_to_float16)
201
+
202
+ # Embedding layer
203
+ # assert args.position_embedding_type == PositionEmbeddingType.rotary, 'LLaMA should use rotary positional embeddings'
204
+ self.specs.append(LayerSpec(EmbeddingPipe,
205
+ args.hidden_size,
206
+ args.padded_vocab_size,
207
+ args.max_position_embeddings,
208
+ args.hidden_dropout,
209
+ init_method=init_method,
210
+ num_tokentypes=num_tokentypes,
211
+ use_position=False))
212
+
213
+ if args.fp32_residual_connection:
214
+ if args.sequence_parallel:
215
+ self.specs.append(lambda x: x.float())
216
+ else:
217
+ self.specs.append(lambda x: x.transpose(0, 1).contiguous().float())
218
+ else:
219
+ if args.sequence_parallel:
220
+ self.specs.append(lambda x: x)
221
+ else:
222
+ self.specs.append(lambda x: x.transpose(0, 1).contiguous())
223
+
224
+ for layer_idx in range(args.num_layers):
225
+ self.specs.append(
226
+ LayerSpec(ParallelTransformerLayerPipe,
227
+ init_method=init_method,
228
+ output_layer_init_method=scaled_init_method,
229
+ layer_number=layer_idx,
230
+ self_attn_mask_type=AttnMaskType.causal))
231
+
232
+
233
+ # Final layernorm after transformer layers
234
+ if args.sequence_parallel:
235
+ self.specs.append(lambda x: x)
236
+ else:
237
+ # Undo data format change
238
+ self.specs.append(lambda x: x.transpose(0, 1).contiguous())
239
+
240
+ # Final RMSNorm after transformer layers
241
+ assert args.layernorm_type=='rmsnorm', 'LLaMA model should use RMSNorm'
242
+ self.specs.append(
243
+ LayerSpec(WrapName, 'final_rmsnorm',
244
+ RMSNorm,
245
+ args.hidden_size,
246
+ eps=args.layernorm_epsilon,
247
+ sequence_parallel=args.sequence_parallel))
248
+
249
+ self.specs.append(
250
+ LayerSpec(WrapName, 'vocab_parallel_projection',
251
+ mpu.layers.VocabParallelProjection,
252
+ args.padded_vocab_size,
253
+ args.hidden_size,
254
+ init_method=init_method)
255
+ )
256
+
257
+ if args.sequence_parallel:
258
+ self.specs.append(lambda x: x.transpose(0, 1).contiguous())
259
+ else:
260
+ self.specs.append(lambda x: x)
261
+
262
+ # Convert to fp32 if needed
263
+ if args.fp16 or args.bf16:
264
+ self.specs.append(float16_to_fp32)
265
+
266
+ if args.checkpoint_activations and args.checkpoint_activations_granularity == "full":
267
+ interval = args.checkpoint_num_layers
268
+ else:
269
+ interval = 0
270
+
271
+ from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology
272
+ topo = PipeModelDataParallelTopology(num_pp=mpu.get_pipeline_model_parallel_world_size(),
273
+ num_mp=mpu.get_tensor_model_parallel_world_size(),
274
+ num_dp=mpu.get_data_parallel_world_size())
275
+
276
+ super().__init__(layers=self.specs,
277
+ loss_fn=CrossEntropy,
278
+ topology=topo,
279
+ activation_checkpoint_interval=interval,
280
+ partition_method='type:transformer')
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/module.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """Megatron Module"""
5
+
6
+ import torch
7
+ from torch.autograd import Variable
8
+ from torch.nn.parameter import Parameter
9
+ from deepspeed.accelerator import get_accelerator
10
+ from megatron import get_args
11
+ from megatron.core import mpu, tensor_parallel
12
+
13
+
14
+ if get_accelerator().device_name() == "hpu":
15
+ # revert this once [SW-160732] is fixed
16
+ _FLOAT_TYPES = (torch.FloatTensor, )
17
+ _HALF_TYPES = (torch.HalfTensor, )
18
+ _BF16_TYPES = (torch.BFloat16Tensor, )
19
+ else:
20
+ _FLOAT_TYPES = (torch.FloatTensor, get_accelerator().FloatTensor)
21
+ _HALF_TYPES = (torch.HalfTensor, get_accelerator().HalfTensor)
22
+ _BF16_TYPES = (torch.BFloat16Tensor, get_accelerator().BFloat16Tensor)
23
+
24
+
25
+
26
+ def param_is_not_shared(param):
27
+ return not hasattr(param, 'shared') or not param.shared
28
+
29
+
30
+
31
+ class MegatronModule(torch.nn.Module):
32
+ """Megatron specific extensions of torch Module with support
33
+ for pipelining."""
34
+
35
+ def __init__(self, config=None, share_embeddings_and_output_weights=True):
36
+ super(MegatronModule, self).__init__()
37
+ self.config = config
38
+ self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
39
+
40
+
41
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
42
+ """Use this function to override the state dict for
43
+ saving checkpoints."""
44
+ return self.state_dict(prefix=prefix, keep_vars=keep_vars)
45
+
46
+
47
+ def shared_embedding_or_output_weight(self):
48
+ if self.pre_process:
49
+ return self.language_model.embedding.word_embeddings.weight
50
+ else:
51
+ if not self.share_embeddings_and_output_weights:
52
+ raise Exception('shared_embedding_or_output_weight() called for last '
53
+ 'stage, but share_embeddings_and_output_weights is false')
54
+ return self.word_embeddings.weight
55
+
56
+
57
+ def initialize_word_embeddings(self):
58
+ args = get_args()
59
+ if not self.share_embeddings_and_output_weights:
60
+ raise Exception('initialize_word_embeddings() was called but '
61
+ 'share_embeddings_and_output_weights is false')
62
+
63
+ # This function just initializes the word embeddings in the final stage
64
+ # when we are using pipeline parallelism. Nothing to do if we aren't
65
+ # using pipeline parallelism.
66
+ if args.pipeline_model_parallel_size == 1:
67
+ return
68
+
69
+ # Parameters are shared between the word embeddings layers, and the
70
+ # heads at the end of the model. In a pipelined setup with more than
71
+ # one stage, the initial embedding layer and the head are on different
72
+ # workers, so we do the following:
73
+ # 1. Create a second copy of word_embeddings on the last stage, with
74
+ # initial parameters of 0.0.
75
+ # 2. Do an all-reduce between the first and last stage to ensure that
76
+ # the two copies of word_embeddings start off with the same
77
+ # parameter values.
78
+ # 3. In the training loop, before an all-reduce between the grads of
79
+ # the two word_embeddings layers to ensure that every applied weight
80
+ # update is the same on both stages.
81
+ if mpu.is_pipeline_last_stage() and not self.pre_process:
82
+ assert not mpu.is_pipeline_first_stage()
83
+ self._word_embeddings_for_head_key = 'word_embeddings_for_head'
84
+ # set word_embeddings weights to 0 here, then copy first
85
+ # stage's weights using all_reduce below.
86
+ self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
87
+ args.padded_vocab_size, self.config.hidden_size,
88
+ config=self.config, init_method=self.config.init_method)
89
+ self.word_embeddings.weight.data.fill_(0)
90
+ self.word_embeddings.weight.shared = True
91
+
92
+ # Zero out initial weights for decoder embedding.
93
+ # NOTE: We don't currently support T5 with the interleaved schedule.
94
+ if not mpu.is_pipeline_first_stage(ignore_virtual=True) and \
95
+ self.pre_process:
96
+ self.language_model.embedding.zero_parameters()
97
+
98
+ if not torch.distributed.is_initialized():
99
+ if not getattr(MegatronModule, "embedding_warning_printed", False):
100
+ print("WARNING! Distributed processes aren't initialized, so "
101
+ "word embeddings in the last layer are not initialized. "
102
+ "If you are just manipulating a model this is fine, but "
103
+ "this needs to be handled manually. If you are training "
104
+ "something is definitely wrong.")
105
+ MegatronModule.embedding_warning_printed = True
106
+ return
107
+
108
+ # Ensure that first and last stages have the same initial parameter
109
+ # values.
110
+ if mpu.is_rank_in_embedding_group():
111
+ torch.distributed.all_reduce(self.shared_embedding_or_output_weight().data,
112
+ group=mpu.get_embedding_group())
113
+
114
+ # Ensure that encoder(first stage) and decoder(split stage) position
115
+ # embeddings have the same initial parameter values
116
+ # NOTE: We don't currently support T5 with the interleaved schedule.
117
+ if mpu.is_rank_in_position_embedding_group() and \
118
+ args.pipeline_model_parallel_split_rank is not None:
119
+ # TODO: Support tokentype embedding.
120
+ self.language_model.embedding.cuda()
121
+ position_embeddings = self.language_model.embedding.position_embeddings
122
+ torch.distributed.all_reduce(position_embeddings.weight.data,
123
+ group=mpu.get_position_embedding_group())
124
+
125
+ def universal_checkpoint_info(self):
126
+ return {}
127
+
128
+ def conversion_helper(val, conversion):
129
+ """Apply conversion to val. Recursively apply conversion if `val`
130
+ #is a nested tuple/list structure."""
131
+ if not isinstance(val, (tuple, list)):
132
+ return conversion(val)
133
+ rtn = [conversion_helper(v, conversion) for v in val]
134
+ if isinstance(val, tuple):
135
+ rtn = tuple(rtn)
136
+ return rtn
137
+
138
+
139
+ def fp32_to_float16(val, float16_convertor):
140
+ """Convert fp32 `val` to fp16/bf16"""
141
+ def half_conversion(val):
142
+ val_typecheck = val
143
+ if isinstance(val_typecheck, (Parameter, Variable)):
144
+ val_typecheck = val.data
145
+ if isinstance(val_typecheck, _FLOAT_TYPES):
146
+ val = float16_convertor(val)
147
+ return val
148
+ return conversion_helper(val, half_conversion)
149
+
150
+
151
+ def float16_to_fp32(val):
152
+ """Convert fp16/bf16 `val` to fp32"""
153
+ def float_conversion(val):
154
+ val_typecheck = val
155
+ if isinstance(val_typecheck, (Parameter, Variable)):
156
+ val_typecheck = val.data
157
+ if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)):
158
+ val = val.float()
159
+ return val
160
+ return conversion_helper(val, float_conversion)
161
+
162
+
163
+
164
+ class Float16Module(MegatronModule):
165
+
166
+ def __init__(self, module, args):
167
+ super(Float16Module, self).__init__()
168
+
169
+ if args.fp16:
170
+ self.add_module('module', module.half())
171
+ def float16_convertor(val):
172
+ return val.half()
173
+ elif args.bf16:
174
+ self.add_module('module', module.bfloat16())
175
+ def float16_convertor(val):
176
+ return val.bfloat16()
177
+ else:
178
+ raise Exception('should not be here')
179
+
180
+ self.float16_convertor = float16_convertor
181
+
182
+
183
+ def set_input_tensor(self, input_tensor):
184
+ return self.module.set_input_tensor(input_tensor)
185
+
186
+
187
+ def forward(self, *inputs, **kwargs):
188
+ if mpu.is_pipeline_first_stage():
189
+ inputs = fp32_to_float16(inputs, self.float16_convertor)
190
+ outputs = self.module(*inputs, **kwargs)
191
+ if mpu.is_pipeline_last_stage():
192
+ outputs = float16_to_fp32(outputs)
193
+ return outputs
194
+
195
+
196
+ def state_dict(self, prefix='', keep_vars=False):
197
+ return self.module.state_dict(prefix=prefix, keep_vars=keep_vars)
198
+
199
+
200
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
201
+ return self.module.state_dict_for_save_checkpoint(prefix=prefix,
202
+ keep_vars=keep_vars)
203
+
204
+
205
+ def load_state_dict(self, state_dict, strict=True):
206
+ self.module.load_state_dict(state_dict, strict=strict)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/realm_model.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ from megatron import get_args, print_rank_0
5
+ from megatron.checkpointing import get_checkpoint_tracker_filename, get_checkpoint_name
6
+ from megatron.model import BertModel
7
+ from .module import MegatronModule
8
+ from megatron.core import mpu
9
+ from megatron.model.enums import AttnMaskType
10
+ from megatron.model.utils import get_linear_layer
11
+ from megatron.model.utils import init_method_normal
12
+ from megatron.model.language_model import get_language_model
13
+ from megatron.model.utils import scaled_init_method_normal
14
+ from megatron.model.bert_model import bert_extended_attention_mask, bert_position_ids
15
+ from deepspeed.accelerator import get_accelerator
16
+
17
+ def general_ict_model_provider(only_query_model=False, only_block_model=False):
18
+ """Build the model."""
19
+ args = get_args()
20
+ assert args.ict_head_size is not None, \
21
+ "Need to specify --ict-head-size to provide an ICTBertModel"
22
+ assert mpu.get_tensor_model_parallel_world_size() == 1 and mpu.get_pipeline_model_parallel_world_size() == 1, \
23
+ "Model parallel size > 1 not supported for ICT"
24
+
25
+ print_rank_0('building ICTBertModel...')
26
+
27
+ # simpler to just keep using 2 tokentypes since the LM we initialize with has 2 tokentypes
28
+ model = ICTBertModel(
29
+ ict_head_size=args.ict_head_size,
30
+ num_tokentypes=2,
31
+ parallel_output=True,
32
+ only_query_model=only_query_model,
33
+ only_block_model=only_block_model)
34
+
35
+ return model
36
+
37
+
38
+ class ICTBertModel(MegatronModule):
39
+ """Bert-based module for Inverse Cloze task."""
40
+ def __init__(self,
41
+ ict_head_size,
42
+ num_tokentypes=1,
43
+ parallel_output=True,
44
+ only_query_model=False,
45
+ only_block_model=False):
46
+ super(ICTBertModel, self).__init__()
47
+ bert_kwargs = dict(
48
+ ict_head_size=ict_head_size,
49
+ num_tokentypes=num_tokentypes,
50
+ parallel_output=parallel_output
51
+ )
52
+ assert not (only_block_model and only_query_model)
53
+ self.use_block_model = not only_query_model
54
+ self.use_query_model = not only_block_model
55
+
56
+ if self.use_query_model:
57
+ # this model embeds (pseudo-)queries - Embed_input in the paper
58
+ self.query_model = IREncoderBertModel(**bert_kwargs)
59
+ self._query_key = 'question_model'
60
+
61
+ if self.use_block_model:
62
+ # this model embeds evidence blocks - Embed_doc in the paper
63
+ self.block_model = IREncoderBertModel(**bert_kwargs)
64
+ self._block_key = 'context_model'
65
+
66
+ def forward(self, query_tokens, query_attention_mask, block_tokens, block_attention_mask):
67
+ """Run a forward pass for each of the models and return the respective embeddings."""
68
+ query_logits = self.embed_query(query_tokens, query_attention_mask)
69
+ block_logits = self.embed_block(block_tokens, block_attention_mask)
70
+ return query_logits, block_logits
71
+
72
+ def embed_query(self, query_tokens, query_attention_mask):
73
+ """Embed a batch of tokens using the query model"""
74
+ if self.use_query_model:
75
+ query_types = get_accelerator().LongTensor(*query_tokens.shape).fill_(0)
76
+ query_ict_logits, _ = self.query_model.forward(query_tokens, query_attention_mask, query_types)
77
+ return query_ict_logits
78
+ else:
79
+ raise ValueError("Cannot embed query without query model.")
80
+
81
+ def embed_block(self, block_tokens, block_attention_mask):
82
+ """Embed a batch of tokens using the block model"""
83
+ if self.use_block_model:
84
+ block_types = get_accelerator().LongTensor(*block_tokens.shape).fill_(0)
85
+ block_ict_logits, _ = self.block_model.forward(block_tokens, block_attention_mask, block_types)
86
+ return block_ict_logits
87
+ else:
88
+ raise ValueError("Cannot embed block without block model.")
89
+
90
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
91
+ """Save dict with state dicts of each of the models."""
92
+ state_dict_ = {}
93
+ if self.use_query_model:
94
+ state_dict_[self._query_key] \
95
+ = self.query_model.state_dict_for_save_checkpoint(
96
+ prefix=prefix, keep_vars=keep_vars)
97
+
98
+ if self.use_block_model:
99
+ state_dict_[self._block_key] \
100
+ = self.block_model.state_dict_for_save_checkpoint(
101
+ prefix=prefix, keep_vars=keep_vars)
102
+
103
+ return state_dict_
104
+
105
+ def load_state_dict(self, state_dict, strict=True):
106
+ """Load the state dicts of each of the models"""
107
+ if self.use_query_model:
108
+ print("Loading ICT query model", flush=True)
109
+ self.query_model.load_state_dict(
110
+ state_dict[self._query_key], strict=strict)
111
+
112
+ if self.use_block_model:
113
+ print("Loading ICT block model", flush=True)
114
+ self.block_model.load_state_dict(
115
+ state_dict[self._block_key], strict=strict)
116
+
117
+ def init_state_dict_from_bert(self):
118
+ """Initialize the state from a pretrained BERT model on iteration zero of ICT pretraining"""
119
+ args = get_args()
120
+ tracker_filename = get_checkpoint_tracker_filename(args.bert_load)
121
+ if not os.path.isfile(tracker_filename):
122
+ raise FileNotFoundError("Could not find BERT load for ICT")
123
+ with open(tracker_filename, 'r') as f:
124
+ iteration = int(f.read().strip())
125
+ assert iteration > 0
126
+
127
+ checkpoint_name = get_checkpoint_name(args.bert_load, iteration, False)
128
+ if mpu.get_data_parallel_rank() == 0:
129
+ print('global rank {} is loading checkpoint {}'.format(
130
+ torch.distributed.get_rank(), checkpoint_name))
131
+
132
+ try:
133
+ state_dict = torch.load(checkpoint_name, map_location='cpu')
134
+ except BaseException:
135
+ raise ValueError("Could not load checkpoint")
136
+
137
+ # load the LM state dict into each model
138
+ model_dict = state_dict['model']['language_model']
139
+ self.query_model.language_model.load_state_dict(model_dict)
140
+ self.block_model.language_model.load_state_dict(model_dict)
141
+
142
+ # give each model the same ict_head to begin with as well
143
+ query_ict_head_state_dict = self.state_dict_for_save_checkpoint()[self._query_key]['ict_head']
144
+ self.block_model.ict_head.load_state_dict(query_ict_head_state_dict)
145
+
146
+
147
+ class IREncoderBertModel(MegatronModule):
148
+ """BERT-based encoder for queries or blocks used for learned information retrieval."""
149
+ def __init__(self, ict_head_size, num_tokentypes=2, parallel_output=True):
150
+ super(IREncoderBertModel, self).__init__()
151
+ args = get_args()
152
+
153
+ self.ict_head_size = ict_head_size
154
+ self.parallel_output = parallel_output
155
+ init_method = init_method_normal(args.init_method_std)
156
+ scaled_init_method = scaled_init_method_normal(args.init_method_std,
157
+ args.num_layers)
158
+
159
+ self.language_model, self._language_model_key = get_language_model(
160
+ num_tokentypes=num_tokentypes,
161
+ add_pooler=True,
162
+ encoder_attn_mask_type=AttnMaskType.padding,
163
+ init_method=init_method,
164
+ scaled_init_method=scaled_init_method)
165
+
166
+ self.ict_head = get_linear_layer(args.hidden_size, ict_head_size, init_method, gather_params_on_init=args.zero_stage == 3)
167
+ self._ict_head_key = 'ict_head'
168
+
169
+ def forward(self, input_ids, attention_mask, tokentype_ids=None):
170
+ extended_attention_mask = bert_extended_attention_mask(
171
+ attention_mask, next(self.language_model.parameters()).dtype)
172
+ position_ids = bert_position_ids(input_ids)
173
+
174
+ lm_output, pooled_output = self.language_model(
175
+ input_ids,
176
+ position_ids,
177
+ extended_attention_mask,
178
+ tokentype_ids=tokentype_ids)
179
+
180
+ # Output.
181
+ ict_logits = self.ict_head(pooled_output)
182
+ return ict_logits, None
183
+
184
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
185
+ """For easy load when model is combined with other heads,
186
+ add an extra key."""
187
+
188
+ state_dict_ = {}
189
+ state_dict_[self._language_model_key] \
190
+ = self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
191
+ keep_vars=keep_vars)
192
+ state_dict_[self._ict_head_key] \
193
+ = self.ict_head.state_dict(prefix=prefix,
194
+ keep_vars=keep_vars)
195
+ return state_dict_
196
+
197
+ def load_state_dict(self, state_dict, strict=True):
198
+ """Customized load."""
199
+ self.language_model.load_state_dict(
200
+ state_dict[self._language_model_key], strict=strict)
201
+ self.ict_head.load_state_dict(
202
+ state_dict[self._ict_head_key], strict=strict)
203
+
204
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/rmsnorm.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # coding=utf-8
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from deepspeed.accelerator import get_accelerator
16
+ from megatron import get_args
17
+
18
+ import torch
19
+ from torch.nn import init
20
+ from torch.nn.parameter import Parameter
21
+
22
+ try:
23
+ from habana_frameworks.torch.hpex.normalization import FusedRMSNorm
24
+ except:
25
+ FusedRMSNorm = None
26
+
27
+
28
+ class RMSNorm(torch.nn.Module):
29
+ def __init__(self, dim, eps=1e-5, sequence_parallel=False):
30
+ super().__init__()
31
+ self.epsilon = eps
32
+ self.weight = Parameter(torch.empty(dim,
33
+ device=get_accelerator().current_device_name(),
34
+ dtype=get_args().params_dtype))
35
+ init.ones_(self.weight)
36
+ self.use_fused_rmsnorm = get_args().use_fused_rmsnorm
37
+
38
+ if sequence_parallel:
39
+ setattr(self.weight, 'sequence_parallel', sequence_parallel)
40
+
41
+ def forward(self, x):
42
+ if self.use_fused_rmsnorm and x.device.type == "hpu":
43
+ assert FusedRMSNorm is not None, "failed to import FusedRMSNorm"
44
+ return FusedRMSNorm.apply(x, self.weight, self.epsilon)
45
+ dtype = x.dtype
46
+ x = x.float()
47
+ norm = torch.mean(x**2, -1, keepdim=True)
48
+ norm = x.mul(norm.add_(self.epsilon).rsqrt_())
49
+ return self.weight * norm.to(dtype)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/rotary_pos_embedding.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # coding=utf-8
3
+
4
+ # The following code has been taken from https://github.com/NVIDIA/NeMo/blob/ \
5
+ # 782b4e1652aaa43c8be390d9db0dc89544afa080/nemo/collections/nlp/modules/ \
6
+ # common/megatron/rotary_pos_embedding.py
7
+
8
+ import importlib.util
9
+ import torch
10
+
11
+ from torch import einsum, nn
12
+
13
+ __all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb']
14
+
15
+ try:
16
+ from habana_frameworks.torch.hpex.kernels import RotaryPosEmbeddingHelperV1
17
+ except ImportError:
18
+ RotaryPosEmbeddingHelperV1 = None
19
+
20
+ # sin, cos tensors cached for all devices
21
+ cos_cached = None
22
+ sin_cached = None
23
+
24
+
25
+ class RotaryEmbedding(nn.Module):
26
+ def __init__(self, dim):
27
+ super().__init__()
28
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
29
+ self.register_buffer('inv_freq', inv_freq)
30
+ if importlib.util.find_spec('einops') is None:
31
+ raise RuntimeError("einops is required for Rotary Embedding")
32
+
33
+ def forward(self, max_seq_len, offset=0):
34
+ seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
35
+ freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
36
+ # first part even vector components, second part odd vector components,
37
+ # 2 * dim in dimension size
38
+ emb = torch.cat((freqs, freqs), dim=-1)
39
+ # emb [seq_length, .., dim]
40
+ from einops import rearrange
41
+ return rearrange(emb, 'n d -> n 1 1 d')
42
+
43
+
44
+ def _rotate_half(x):
45
+ """
46
+ change sign so the last dimension becomes [-odd, +even]
47
+ """
48
+ from einops import rearrange
49
+ x = rearrange(x, '... (j d) -> ... j d', j=2)
50
+ x1, x2 = x.unbind(dim=-2)
51
+ return torch.cat((-x2, x1), dim=-1)
52
+
53
+
54
+ def apply_rotary_pos_emb(t, freqs):
55
+ """
56
+ input tensor t is of shape [seq_length, ..., dim]
57
+ rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
58
+ check https://kexue.fm/archives/8265 for detailed formulas
59
+ """
60
+ rot_dim = freqs.shape[-1]
61
+ t_pass = None
62
+ # due to 0 dim of t_pass tensor, there is zeros tensor DMA from H2D which
63
+ # affects performance, check whether we need t_pass
64
+ if t.shape[-1] != rot_dim:
65
+ # ideally t_pass is empty so rotary pos embedding is applied to all tensor t
66
+ t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
67
+
68
+ global cos_cached, sin_cached
69
+ if cos_cached is None or sin_cached is None:
70
+ cos_cached = freqs.cos().to(t.dtype)
71
+ sin_cached = freqs.sin().to(t.dtype)
72
+
73
+ if t.device.type == "hpu":
74
+ assert RotaryPosEmbeddingHelperV1 is not None, "failed to import RotaryPosEmbeddingHelperV1"
75
+ t = RotaryPosEmbeddingHelperV1.apply(t, cos_cached, sin_cached, 0) # offset already used in RotaryEmbedding.forward
76
+ else:
77
+ # first part is cosine component
78
+ # second part is sine component, need to change signs with _rotate_half method
79
+ t = (t * cos_cached) + (_rotate_half(t) * sin_cached)
80
+ if t_pass is None:
81
+ return t
82
+ return torch.cat((t, t_pass), dim=-1)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/__init__.py ADDED
File without changes
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/__pycache__/knn_monitor.cpython-310.pyc ADDED
Binary file (3.43 kB). View file
 
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/classification.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Vision Transformer(VIT) model."""
4
+
5
+ import torch
6
+ from torch.nn.init import trunc_normal_
7
+ from megatron import get_args
8
+ from megatron.model.utils import get_linear_layer
9
+ from megatron.model.vision.vit_backbone import VitBackbone, VitMlpHead
10
+ from megatron.model.vision.mit_backbone import mit_b3_avg
11
+ from megatron.model.module import MegatronModule
12
+
13
+ class VitClassificationModel(MegatronModule):
14
+ """Vision Transformer Model."""
15
+
16
+ def __init__(self, config, num_classes, finetune=False,
17
+ pre_process=True, post_process=True):
18
+ super(VitClassificationModel, self).__init__()
19
+ args = get_args()
20
+
21
+ self.hidden_size = args.hidden_size
22
+ self.num_classes = num_classes
23
+ self.finetune = finetune
24
+ self.pre_process = pre_process
25
+ self.post_process = post_process
26
+ self.backbone = VitBackbone(
27
+ config=config,
28
+ pre_process=self.pre_process,
29
+ post_process=self.post_process,
30
+ single_token_output=True
31
+ )
32
+
33
+ if self.post_process:
34
+ if not self.finetune:
35
+ self.head = VitMlpHead(self.hidden_size, self.num_classes)
36
+ else:
37
+ self.head = get_linear_layer(
38
+ self.hidden_size,
39
+ self.num_classes,
40
+ torch.nn.init.zeros_,
41
+ gather_params_on_init=args.zero_stage == 3
42
+ )
43
+
44
+ def set_input_tensor(self, input_tensor):
45
+ """See megatron.model.transformer.set_input_tensor()"""
46
+ self.backbone.set_input_tensor(input_tensor)
47
+
48
+ def forward(self, input):
49
+ hidden_states = self.backbone(input)
50
+
51
+ if self.post_process:
52
+ hidden_states = self.head(hidden_states)
53
+
54
+ return hidden_states
55
+
56
+
57
+ class MitClassificationModel(MegatronModule):
58
+ """Mix vision Transformer Model."""
59
+
60
+ def __init__(self, num_classes,
61
+ pre_process=True, post_process=True):
62
+ super(MitClassificationModel, self).__init__()
63
+ args = get_args()
64
+
65
+ self.hidden_size = args.hidden_size
66
+ self.num_classes = num_classes
67
+
68
+ self.backbone = mit_b3_avg()
69
+ self.head = torch.nn.Linear(512, num_classes)
70
+ self.apply(self._init_weights)
71
+
72
+ def _init_weights(self, m):
73
+ if isinstance(m, torch.nn.Linear):
74
+ trunc_normal_(m.weight, std=.02)
75
+ if isinstance(m, torch.nn.Linear) and m.bias is not None:
76
+ torch.nn.init.constant_(m.bias, 0)
77
+
78
+ def set_input_tensor(self, input_tensor):
79
+ """See megatron.model.transformer.set_input_tensor()"""
80
+ pass
81
+
82
+ def forward(self, input):
83
+ hidden_states = self.backbone(input)
84
+ hidden_states = self.head(hidden_states)
85
+
86
+ return hidden_states
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/dino.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the Apache license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ # copied from https://github.com/facebookresearch/dino/blob/main/main_dino.py
7
+ # reworked/refactored some parts to make it run in Megatron.
8
+ import math
9
+ import apex
10
+ import einops
11
+ import torch
12
+ import numpy as np
13
+ import torch.nn.functional as F
14
+ from torch.nn.init import trunc_normal_
15
+ from megatron import get_args, print_rank_0
16
+ from megatron.model.utils import get_linear_layer
17
+ from megatron.model.vision.vit_backbone import VitBackbone
18
+ from megatron.model.module import MegatronModule
19
+ from megatron.model.vision.mit_backbone import mit_b5_avg
20
+ from megatron.model.vision.esvit_swin_backbone import get_swin
21
+
22
+
23
+ class DINOLoss(torch.nn.Module):
24
+ def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
25
+ warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
26
+ center_momentum=0.9):
27
+ super().__init__()
28
+ self.student_temp = student_temp
29
+ self.center_momentum = center_momentum
30
+ self.ncrops = ncrops
31
+ self.register_buffer("center", torch.zeros(1, out_dim))
32
+ # we apply a warm up for the teacher temperature because
33
+ # a too high temperature makes the training instable at the beginning
34
+ self.teacher_temp_schedule = np.concatenate((
35
+ np.linspace(warmup_teacher_temp,
36
+ teacher_temp, warmup_teacher_temp_epochs),
37
+ np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
38
+ ))
39
+ self.teacher_temp = teacher_temp
40
+
41
+ def forward(self, student_output, teacher_output, iteration):
42
+ """
43
+ Cross-entropy between softmax outputs of the teacher
44
+ and student network.
45
+ """
46
+ args = get_args()
47
+ student_out = student_output / self.student_temp
48
+ student_out = student_out.chunk(self.ncrops)
49
+
50
+ epoch = iteration // args.iter_per_epoch
51
+
52
+ # teacher centering and sharpening
53
+ temp = self.teacher_temp_schedule[epoch]
54
+ teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
55
+
56
+ teacher_out = teacher_out.detach().chunk(2)
57
+
58
+ total_loss = 0
59
+ n_loss_terms = 0
60
+ for iq, q in enumerate(teacher_out):
61
+ for v in range(len(student_out)):
62
+ if v == iq:
63
+ # we skip cases where student and teacher operate on the same view
64
+ continue
65
+ loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
66
+ total_loss += loss.mean()
67
+ n_loss_terms += 1
68
+ total_loss /= n_loss_terms
69
+ self.update_center(teacher_output)
70
+ return total_loss
71
+
72
+ @torch.no_grad()
73
+ def update_center(self, teacher_output):
74
+ """
75
+ Update center used for teacher output.
76
+ """
77
+ batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
78
+ torch.distributed.all_reduce(batch_center)
79
+ batch_center = batch_center / (len(teacher_output) * torch.distributed.get_world_size())
80
+ self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
81
+
82
+ class DINOHead(torch.nn.Module):
83
+ def __init__(self, in_dim, out_dim, norm_last_layer=True, nlayers=3):
84
+ super().__init__()
85
+ args = get_args()
86
+ hidden_dim = args.dino_head_hidden_size
87
+ bottleneck_dim = args.dino_bottleneck_size
88
+ nlayers = max(nlayers, 1)
89
+ if nlayers == 1:
90
+ self.mlp = torch.nn.Linear(in_dim, bottleneck_dim)
91
+ else:
92
+ layers = [torch.nn.Linear(in_dim, hidden_dim)]
93
+ layers.append(torch.nn.GELU())
94
+ for _ in range(nlayers - 2):
95
+ layers.append(torch.nn.Linear(hidden_dim, hidden_dim))
96
+ layers.append(torch.nn.GELU())
97
+ layers.append(torch.nn.Linear(hidden_dim, bottleneck_dim))
98
+ self.mlp = torch.nn.Sequential(*layers)
99
+ self.apply(self._init_weights)
100
+ self.last_layer = torch.nn.utils.weight_norm(torch.nn.Linear(bottleneck_dim, out_dim, bias=False))
101
+ self.last_layer.weight_g.data.fill_(1)
102
+ if norm_last_layer:
103
+ self.last_layer.weight_g.requires_grad = False
104
+
105
+ def _init_weights(self, m):
106
+ if isinstance(m, torch.nn.Linear):
107
+ trunc_normal_(m.weight, std=.02)
108
+ if isinstance(m, torch.nn.Linear) and m.bias is not None:
109
+ torch.nn.init.constant_(m.bias, 0)
110
+
111
+ def forward(self, x):
112
+ x = self.mlp(x)
113
+ x = torch.nn.functional.normalize(x, dim=-1, p=2)
114
+ x = self.last_layer(x)
115
+ return x
116
+
117
+
118
+ class MultiCropWrapper(MegatronModule):
119
+
120
+ """
121
+ Perform forward pass separately on each resolution input.
122
+ The inputs corresponding to a single resolution are clubbed and single
123
+ forward is run on the same resolution inputs. Hence we do several
124
+ forward passes = number of different resolutions used. We then
125
+ concatenate all the output features and run the head forward on these
126
+ concatenated features.
127
+ """
128
+ def __init__(self, backbone, head):
129
+ super(MultiCropWrapper, self).__init__()
130
+ # disable layers dedicated to ImageNet labels classification
131
+ #backbone.fc, backbone.head = torch.nn.Identity(), torch.nn.Identity()
132
+ self.backbone = backbone
133
+ self.head = head
134
+
135
+ def forward(self, x):
136
+ # convert to list
137
+ if not isinstance(x, list):
138
+ x = [x]
139
+ idx_crops = torch.cumsum(torch.unique_consecutive(
140
+ torch.tensor([inp.shape[-1] for inp in x]),
141
+ return_counts=True,
142
+ )[1], 0)
143
+
144
+ start_idx = 0
145
+ for end_idx in idx_crops:
146
+ _out = self.backbone(torch.cat(x[start_idx: end_idx]))
147
+ if start_idx == 0:
148
+ output = _out
149
+ else:
150
+ output = torch.cat((output, _out))
151
+ start_idx = end_idx
152
+ # Run the head forward on the concatenated features.
153
+ if self.training:
154
+ return self.head(output)
155
+ else:
156
+ return output
157
+
158
+
159
+ def cosine_scheduler(base_value, final_value, epochs, niter_per_ep,
160
+ warmup_epochs=0, start_warmup_value=0):
161
+ warmup_schedule = np.array([])
162
+ warmup_iters = warmup_epochs * niter_per_ep
163
+ if warmup_epochs > 0:
164
+ warmup_schedule = \
165
+ np.linspace(start_warmup_value, base_value, warmup_iters)
166
+
167
+ iters = np.arange(epochs * niter_per_ep - warmup_iters)
168
+ schedule = final_value + 0.5 * (base_value - final_value) \
169
+ * (1 + np.cos(np.pi * iters / len(iters)))
170
+
171
+ schedule = np.concatenate((warmup_schedule, schedule))
172
+ assert len(schedule) == epochs * niter_per_ep
173
+ return schedule
174
+
175
+
176
+ def get_student_backbone_and_num_features(config, pre_process=True, post_process=True):
177
+ args = get_args()
178
+
179
+ if args.vision_backbone_type == 'vit':
180
+ student = VitBackbone(config,
181
+ pre_process=pre_process,
182
+ post_process=post_process,
183
+ drop_path_rate=0.1,
184
+ single_token_output=True)
185
+ num_features = args.hidden_size
186
+ elif args.vision_backbone_type == 'mit':
187
+ student = mit_b5_avg(drop_path_rate=0.1)
188
+ num_features = 512
189
+ elif args.vision_backbone_type == 'swin':
190
+ student = get_swin()
191
+ num_features = student.num_features
192
+ else:
193
+ raise Exception('{} vision backbone is not supported.'.format(
194
+ args.vision_backbone_type))
195
+
196
+ return student, num_features
197
+
198
+ def get_teacher_backbone_and_num_features(config, pre_process=True, post_process=True):
199
+ args = get_args()
200
+
201
+ if args.vision_backbone_type == 'vit':
202
+ teacher = VitBackbone(config,
203
+ pre_process=pre_process,
204
+ post_process=post_process,
205
+ single_token_output=True)
206
+ num_features = args.hidden_size
207
+ elif args.vision_backbone_type == 'mit':
208
+ teacher = mit_b5_avg(drop_path_rate=0.0)
209
+ num_features = 512
210
+ elif args.vision_backbone_type == 'swin':
211
+ teacher = get_swin(is_teacher=True)
212
+ num_features = teacher.num_features
213
+ else:
214
+ raise Exception('{} vision backbone is not supported.'.format(
215
+ args.vision_backbone_type))
216
+ return teacher, num_features
217
+
218
+
219
+ class DINOPretrainModel(MegatronModule):
220
+ def __init__(self, config, pre_process=True, post_process=True):
221
+ super(DINOPretrainModel, self).__init__()
222
+ args = get_args()
223
+ self.out_dim = 65536
224
+
225
+ self.dino_loss = DINOLoss(
226
+ self.out_dim,
227
+ args.dino_local_crops_number + 2,
228
+ args.dino_warmup_teacher_temp,
229
+ args.dino_teacher_temp,
230
+ args.dino_warmup_teacher_temp_epochs,
231
+ 300,
232
+ )
233
+
234
+ self.pre_process = pre_process
235
+ self.post_process = post_process
236
+ self.momentum_teacher = 0.996
237
+
238
+ student_backbone, num_features = \
239
+ get_student_backbone_and_num_features(config, pre_process, post_process)
240
+
241
+ self.student = MultiCropWrapper(
242
+ student_backbone,
243
+ DINOHead(num_features, self.out_dim,
244
+ norm_last_layer=args.dino_norm_last_layer)
245
+ )
246
+
247
+ self.momentum_schedule = cosine_scheduler(
248
+ self.momentum_teacher, 1,
249
+ args.train_iters // args.iter_per_epoch,
250
+ args.iter_per_epoch
251
+ )
252
+
253
+ teacher_backbone, num_features = \
254
+ get_teacher_backbone_and_num_features(config, pre_process, post_process)
255
+ self.teacher = MultiCropWrapper(
256
+ teacher_backbone,
257
+ DINOHead(num_features, self.out_dim)
258
+ )
259
+ self.teacher.load_state_dict(self.student.state_dict())
260
+
261
+ for p in self.teacher.parameters():
262
+ if hasattr(p, "requires_grad") and p.requires_grad is not None:
263
+ p.requires_grad = False
264
+
265
+ def set_input_tensor(self, tensor):
266
+ pass
267
+
268
+ def forward(self, input):
269
+ student_output = None
270
+ if self.training:
271
+ student_output = self.student(input)
272
+ teacher_output = self.teacher(input[:2])
273
+ else:
274
+ teacher_output = self.teacher(input)
275
+ return student_output, teacher_output
276
+
277
+ def cancel_gradients_last_layer(self, iteration):
278
+ args = get_args()
279
+ epoch = iteration // args.iter_per_epoch
280
+ if epoch < args.dino_freeze_last_layer:
281
+ for n, p in self.student.named_parameters():
282
+ if "last_layer" in n:
283
+ p.grad = None
284
+
285
+ def update_momentum(self, iteration):
286
+ with torch.no_grad():
287
+ m = self.momentum_schedule[iteration]
288
+ for param_q, param_k in zip(self.student.parameters(), self.teacher.parameters()):
289
+ param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
290
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/esvit_swin_backbone.py ADDED
@@ -0,0 +1,849 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Microsoft
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ # --------------------------------------------------------
6
+ # Modified by Chunyuan Li ([email protected])
7
+ # Swin Transformer
8
+ # --------------------------------------------------------
9
+
10
+ import os
11
+ import logging
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from functools import partial
16
+ import torch.distributed as dist
17
+ from torch.nn.init import trunc_normal_
18
+ from megatron.model.transformer import DropPath
19
+ from megatron import get_args
20
+ from megatron.model import LayerNorm
21
+ import numpy as np
22
+ from math import sqrt
23
+
24
+
25
+ class Mlp(nn.Module):
26
+ def __init__(self, in_features, hidden_features=None,
27
+ out_features=None, act_layer=nn.GELU, drop=0.):
28
+ super(Mlp, self).__init__()
29
+ out_features = out_features or in_features
30
+ hidden_features = hidden_features or in_features
31
+ self.fc1 = nn.Linear(in_features, hidden_features)
32
+ self.act = act_layer()
33
+ self.fc2 = nn.Linear(hidden_features, out_features)
34
+ self.drop = nn.Dropout(drop)
35
+
36
+ def forward(self, x):
37
+ x = self.fc1(x)
38
+ x = self.act(x)
39
+ x = self.drop(x)
40
+ x = self.fc2(x)
41
+ x = self.drop(x)
42
+ return x
43
+
44
+
45
+ def window_partition(x, window_size):
46
+ """
47
+ Args:
48
+ x: (B, H, W, C)
49
+ window_size (int): window size
50
+ Returns:
51
+ windows: (num_windows*B, window_size, window_size, C)
52
+ """
53
+ B, H, W, C = x.shape
54
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
55
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
56
+ return windows
57
+
58
+
59
+ def window_reverse(windows, window_size, H, W):
60
+ """
61
+ Args:
62
+ windows: (num_windows*B, window_size, window_size, C)
63
+ window_size (int): Window size
64
+ H (int): Height of image
65
+ W (int): Width of image
66
+ Returns:
67
+ x: (B, H, W, C)
68
+ """
69
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
70
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
71
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
72
+ return x
73
+
74
+
75
+ class WindowAttention(nn.Module):
76
+ r"""Window based multi-head self attention (W-MSA) module with relative position bias.
77
+ It supports both of shifted and non-shifted window.
78
+ Args:
79
+ dim (int): Number of input channels.
80
+ window_size (tuple[int]): The height and width of the window.
81
+ num_heads (int): Number of attention heads.
82
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
83
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
84
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
85
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
86
+ """
87
+
88
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
89
+
90
+ super(WindowAttention, self).__init__()
91
+ self.dim = dim
92
+ self.window_size = window_size # Wh, Ww
93
+ self.num_heads = num_heads
94
+ head_dim = dim // num_heads
95
+ self.scale = qk_scale or head_dim ** -0.5
96
+
97
+ # define a parameter table of relative position bias
98
+ self.relative_position_bias_table = nn.Parameter(
99
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
100
+
101
+ # get pair-wise relative position index for each token inside the window
102
+ coords_h = torch.arange(self.window_size[0])
103
+ coords_w = torch.arange(self.window_size[1])
104
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
105
+ coords_flatten = torch.flatten(coords, 1) # 2 Wh*Ww
106
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
107
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
108
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
109
+ relative_coords[:, :, 1] += self.window_size[1] - 1
110
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
111
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
112
+ self.register_buffer("relative_position_index", relative_position_index)
113
+
114
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
115
+ self.attn_drop = nn.Dropout(attn_drop)
116
+ self.proj = nn.Linear(dim, dim)
117
+ self.proj_drop = nn.Dropout(proj_drop)
118
+
119
+ trunc_normal_(self.relative_position_bias_table, std=.02)
120
+ self.softmax = nn.Softmax(dim=-1)
121
+
122
+ def forward(self, x, mask=None):
123
+ """
124
+ Args:
125
+ x: input features with shape of (num_windows*B, N, C)
126
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
127
+ """
128
+ B_, N, C = x.shape
129
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
130
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
131
+
132
+ q = q * self.scale
133
+ attn = (q @ k.transpose(-2, -1))
134
+
135
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
136
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
137
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
138
+ attn = attn + relative_position_bias.unsqueeze(0)
139
+
140
+ if mask is not None:
141
+ nW = mask.shape[0]
142
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0).type(attn.type())
143
+ attn = attn.view(-1, self.num_heads, N, N)
144
+ attn = self.softmax(attn)
145
+ else:
146
+ attn = self.softmax(attn)
147
+
148
+ attn_out = attn
149
+ attn = self.attn_drop(attn)
150
+
151
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
152
+ x = self.proj(x)
153
+ x = self.proj_drop(x)
154
+ return x, attn_out
155
+
156
+ def extra_repr(self) -> str:
157
+ return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
158
+
159
+ def flops(self, N):
160
+ # calculate flops for 1 window with token length of N
161
+ flops = 0
162
+ # qkv = self.qkv(x)
163
+ flops += N * self.dim * 3 * self.dim
164
+ # attn = (q @ k.transpose(-2, -1))
165
+ flops += self.num_heads * N * (self.dim // self.num_heads) * N
166
+ # x = (attn @ v)
167
+ flops += self.num_heads * N * N * (self.dim // self.num_heads)
168
+ # x = self.proj(x)
169
+ flops += N * self.dim * self.dim
170
+ return flops
171
+
172
+ @staticmethod
173
+ def compute_macs(module, input, output):
174
+ B, N, C = input[0].shape
175
+
176
+ module.__flops__ += module.flops(N) * B
177
+
178
+
179
+ class SwinTransformerBlock(nn.Module):
180
+ r"""Swin Transformer Block.
181
+ Args:
182
+ dim (int): Number of input channels.
183
+ input_resolution (tuple[int]): Input resulotion.
184
+ num_heads (int): Number of attention heads.
185
+ window_size (int): Window size.
186
+ shift_size (int): Shift size for SW-MSA.
187
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
188
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
189
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
190
+ drop (float, optional): Dropout rate. Default: 0.0
191
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
192
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
193
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
194
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
195
+ """
196
+
197
+ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
198
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
199
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
200
+ super().__init__()
201
+ self.dim = dim
202
+ self.input_resolution = input_resolution
203
+ self.num_heads = num_heads
204
+ self.window_size = window_size
205
+ self.shift_size = shift_size
206
+ self.mlp_ratio = mlp_ratio
207
+ if min(self.input_resolution) <= self.window_size:
208
+ # if window size is larger than input resolution, we don't partition windows
209
+ self.shift_size = 0
210
+ self.window_size = min(self.input_resolution)
211
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
212
+
213
+ self.norm1 = norm_layer(dim)
214
+ self.attn = WindowAttention(
215
+ dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,
216
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
217
+
218
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
219
+ self.norm2 = norm_layer(dim)
220
+ mlp_hidden_dim = int(dim * mlp_ratio)
221
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
222
+
223
+ self.H = input_resolution[0]
224
+ self.W = input_resolution[1]
225
+
226
+ self.attn_mask_dict = {}
227
+
228
+
229
+ def create_attn_mask(self, H, W):
230
+ # calculate attention mask for SW-MSA
231
+
232
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
233
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
234
+ img_mask = torch.zeros((1, Hp, Wp, 1)) # 1 Hp Wp 1
235
+ h_slices = (slice(0, -self.window_size),
236
+ slice(-self.window_size, -self.shift_size),
237
+ slice(-self.shift_size, None))
238
+ w_slices = (slice(0, -self.window_size),
239
+ slice(-self.window_size, -self.shift_size),
240
+ slice(-self.shift_size, None))
241
+ cnt = 0
242
+ for h in h_slices:
243
+ for w in w_slices:
244
+ img_mask[:, h, w, :] = cnt
245
+ cnt += 1
246
+
247
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
248
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
249
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
250
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
251
+
252
+ return attn_mask
253
+
254
+
255
+ def forward(self, x):
256
+ B, L, C = x.shape
257
+ H = int(sqrt(L))
258
+ W = H
259
+
260
+ shortcut = x
261
+ x = self.norm1(x)
262
+ x = x.view(B, H, W, C)
263
+
264
+ # pad feature maps to multiples of window size
265
+ pad_l = pad_t = 0
266
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
267
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
268
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
269
+ _, Hp, Wp, _ = x.shape
270
+
271
+ # cyclic shift
272
+ if self.shift_size > 0:
273
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
274
+
275
+ if H in self.attn_mask_dict.keys():
276
+ attn_mask = self.attn_mask_dict[H]
277
+ else:
278
+ self.attn_mask_dict[H] = self.create_attn_mask(self.H, self.W).to(x.device)
279
+ attn_mask = self.attn_mask_dict[H]
280
+
281
+ else:
282
+ shifted_x = x
283
+ attn_mask = None
284
+
285
+ # partition windows
286
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
287
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
288
+
289
+ # W-MSA/SW-MSA
290
+ attn_windows, attn = self.attn(x_windows, attn_mask) # nW*B, window_size*window_size, C
291
+
292
+ # merge windows
293
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
294
+ shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
295
+
296
+ # reverse cyclic shift
297
+ if self.shift_size > 0:
298
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
299
+ else:
300
+ x = shifted_x
301
+
302
+ if pad_r > 0 or pad_b > 0:
303
+ x = x[:, :H, :W, :].contiguous()
304
+
305
+ x = x.view(B, H * W, C)
306
+
307
+ # FFN
308
+ x = shortcut + self.drop_path(x)
309
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
310
+
311
+ return x, attn
312
+
313
+ def extra_repr(self) -> str:
314
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
315
+ f"window_size={self.window_size}, shift_size={self.shift_size} mlp_ratio={self.mlp_ratio}"
316
+
317
+ def flops(self):
318
+ flops = 0
319
+ H, W = self.input_resolution
320
+ # norm1
321
+ flops += self.dim * H * W
322
+ # W-MSA/SW-MSA
323
+ nW = H * W / self.window_size / self.window_size
324
+ flops += nW * self.attn.flops(self.window_size * self.window_size)
325
+ # mlp
326
+ flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
327
+ # norm2
328
+ flops += self.dim * H * W
329
+ return flops
330
+
331
+
332
+ class PatchMerging(nn.Module):
333
+ r"""Patch Merging Layer.
334
+ Args:
335
+ input_resolution (tuple[int]): Resolution of input feature.
336
+ dim (int): Number of input channels.
337
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
338
+ """
339
+
340
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
341
+ super().__init__()
342
+ self.input_resolution = input_resolution
343
+ self.dim = dim
344
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
345
+ self.norm = norm_layer(4 * dim)
346
+
347
+ def forward(self, x):
348
+ """ Forward function.
349
+ Args:
350
+ x: Input feature, tensor size (B, H*W, C).
351
+ H, W: Spatial resolution of the input feature.
352
+ """
353
+ B, L, C = x.shape
354
+ H = int(sqrt(L))
355
+ W = H
356
+
357
+ x = x.view(B, H, W, C)
358
+
359
+ # padding
360
+ pad_input = (H % 2 == 1) or (W % 2 == 1)
361
+ if pad_input:
362
+ x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
363
+
364
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
365
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
366
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
367
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
368
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
369
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
370
+
371
+ x = self.norm(x)
372
+ x = self.reduction(x)
373
+
374
+ return x
375
+
376
+
377
+ def extra_repr(self) -> str:
378
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
379
+
380
+ def flops(self):
381
+ H, W = self.input_resolution
382
+ flops = H * W * self.dim
383
+ flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
384
+ return flops
385
+
386
+
387
+ class BasicLayer(nn.Module):
388
+ """A basic Swin Transformer layer for one stage.
389
+ Args:
390
+ dim (int): Number of input channels.
391
+ input_resolution (tuple[int]): Input resulotion.
392
+ depth (int): Number of blocks.
393
+ num_heads (int): Number of attention heads.
394
+ window_size (int): Window size.
395
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
396
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
397
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
398
+ drop (float, optional): Dropout rate. Default: 0.0
399
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
400
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
401
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
402
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
403
+ """
404
+
405
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
406
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
407
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None):
408
+
409
+ super().__init__()
410
+ self.dim = dim
411
+ self.input_resolution = input_resolution
412
+ self.depth = depth
413
+
414
+ self.blocks = nn.ModuleList([
415
+ SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
416
+ num_heads=num_heads, window_size=window_size,
417
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
418
+ mlp_ratio=mlp_ratio,
419
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
420
+ drop=drop, attn_drop=attn_drop,
421
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
422
+ norm_layer=norm_layer)
423
+ for i in range(depth)])
424
+ if downsample is not None:
425
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
426
+ else:
427
+ self.downsample = None
428
+
429
+ def forward(self, x):
430
+ for blk in self.blocks:
431
+ x, _ = blk(x)
432
+ if self.downsample is not None:
433
+ x = self.downsample(x)
434
+ return x
435
+
436
+ def forward_with_features(self, x):
437
+ fea = []
438
+ for blk in self.blocks:
439
+ x, _ = blk(x)
440
+ fea.append(x)
441
+ if self.downsample is not None:
442
+ x = self.downsample(x)
443
+ return x, fea
444
+
445
+ def forward_with_attention(self, x):
446
+ attns = []
447
+ for blk in self.blocks:
448
+ x, attn = blk(x)
449
+ attns.append(attn)
450
+ if self.downsample is not None:
451
+ x = self.downsample(x)
452
+ return x, attns
453
+
454
+
455
+ def extra_repr(self) -> str:
456
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
457
+
458
+ def flops(self):
459
+ flops = 0
460
+ for blk in self.blocks:
461
+ flops += blk.flops()
462
+ if self.downsample is not None:
463
+ flops += self.downsample.flops()
464
+ return flops
465
+
466
+
467
+ class PatchEmbed(nn.Module):
468
+ """ Image to Patch Embedding
469
+ """
470
+
471
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None):
472
+ super().__init__()
473
+ img_size = (img_size, img_size)
474
+ patch_size = (patch_size, patch_size)
475
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
476
+ self.img_size = img_size
477
+ self.patch_size = patch_size
478
+ self.patches_resolution = patches_resolution
479
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
480
+
481
+ self.in_chans = in_chans
482
+ self.embed_dim = embed_dim
483
+
484
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
485
+ if norm_layer is not None:
486
+ self.norm = norm_layer(embed_dim)
487
+ else:
488
+ self.norm = None
489
+
490
+ def forward(self, x):
491
+ B, C, H, W = x.shape
492
+
493
+ x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
494
+ if self.norm is not None:
495
+ x = self.norm(x)
496
+ return x
497
+
498
+
499
+ def flops(self):
500
+ Ho, Wo = self.patches_resolution
501
+ flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
502
+ if self.norm is not None:
503
+ flops += Ho * Wo * self.embed_dim
504
+ return flops
505
+
506
+ class SwinTransformer(nn.Module):
507
+ r""" Swin Transformer
508
+ A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
509
+ https://arxiv.org/pdf/2103.14030
510
+ Args:
511
+ img_size (int | tuple(int)): Input image size.
512
+ patch_size (int | tuple(int)): Patch size.
513
+ in_chans (int): Number of input channels.
514
+ num_classes (int): Number of classes for classification head.
515
+ embed_dim (int): Embedding dimension.
516
+ depths (tuple(int)): Depth of Swin Transformer layers.
517
+ num_heads (tuple(int)): Number of attention heads in different layers.
518
+ window_size (int): Window size.
519
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
520
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: Truee
521
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
522
+ drop_rate (float): Dropout rate.
523
+ attn_drop_rate (float): Attention dropout rate.
524
+ drop_path_rate (float): Stochastic depth rate.
525
+ norm_layer (nn.Module): normalization layer.
526
+ ape (bool): If True, add absolute position embedding to the patch embedding.
527
+ patch_norm (bool): If True, add normalization after patch embedding.
528
+ """
529
+
530
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
531
+ embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
532
+ window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
533
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
534
+ norm_layer=nn.LayerNorm, ape=False, patch_norm=True, **kwargs):
535
+ super().__init__()
536
+
537
+ self.num_classes = num_classes
538
+ self.num_layers = len(depths)
539
+ self.embed_dim = embed_dim
540
+ self.ape = ape
541
+ self.patch_norm = patch_norm
542
+ self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
543
+ self.mlp_ratio = mlp_ratio
544
+
545
+ self.patch_embed = PatchEmbed(
546
+ img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
547
+ norm_layer=norm_layer if self.patch_norm else None)
548
+ num_patches = self.patch_embed.num_patches
549
+ patches_resolution = self.patch_embed.patches_resolution
550
+ self.patches_resolution = patches_resolution
551
+
552
+ if self.ape:
553
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
554
+ trunc_normal_(self.absolute_pos_embed, std=.02)
555
+
556
+ self.pos_drop = nn.Dropout(p=drop_rate)
557
+
558
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
559
+ self.layers = nn.ModuleList()
560
+ for i_layer in range(self.num_layers):
561
+ layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
562
+ input_resolution=(patches_resolution[0] // (2 ** i_layer),
563
+ patches_resolution[1] // (2 ** i_layer)),
564
+ depth=depths[i_layer],
565
+ num_heads=num_heads[i_layer],
566
+ window_size=window_size,
567
+ mlp_ratio=self.mlp_ratio,
568
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
569
+ drop=drop_rate, attn_drop=attn_drop_rate,
570
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
571
+ norm_layer=norm_layer,
572
+ downsample=PatchMerging if (i_layer < self.num_layers - 1) else None)
573
+ self.layers.append(layer)
574
+
575
+ self.norm = norm_layer(self.num_features)
576
+ self.avgpool = nn.AdaptiveAvgPool1d(1)
577
+
578
+ self.apply(self._init_weights)
579
+
580
+ def _init_weights(self, m):
581
+ if isinstance(m, nn.Linear):
582
+ trunc_normal_(m.weight, std=.02)
583
+ if isinstance(m, nn.Linear) and m.bias is not None:
584
+ nn.init.constant_(m.bias, 0)
585
+ elif isinstance(m, nn.LayerNorm):
586
+ nn.init.constant_(m.bias, 0)
587
+ nn.init.constant_(m.weight, 1.0)
588
+
589
+ @torch.jit.ignore
590
+ def no_weight_decay(self):
591
+ return {'absolute_pos_embed'}
592
+
593
+ @torch.jit.ignore
594
+ def no_weight_decay_keywords(self):
595
+ # todo: to be implemented
596
+ return {'relative_position_bias_table'}
597
+
598
+ def forward(self, x):
599
+ x = self.patch_embed(x)
600
+ if self.ape:
601
+ x = x + self.absolute_pos_embed
602
+ x = self.pos_drop(x)
603
+
604
+ for layer in self.layers:
605
+ x = layer(x)
606
+
607
+ x_region = self.norm(x) # B L C
608
+ x = self.avgpool(x_region.transpose(1, 2)) # B C 1
609
+ x = torch.flatten(x, 1)
610
+
611
+ return x
612
+
613
+
614
+ def forward_feature_maps(self, x):
615
+ x = self.patch_embed(x)
616
+ if self.ape:
617
+ x = x + self.absolute_pos_embed
618
+ x = self.pos_drop(x)
619
+
620
+ for layer in self.layers:
621
+ x = layer(x)
622
+
623
+ x_grid = self.norm(x) # B L C
624
+ x = self.avgpool(x_grid.transpose(1, 2)) # B C 1
625
+ x = torch.flatten(x, 1)
626
+
627
+ return x, x_grid
628
+
629
+
630
+ def forward_selfattention(self, x, n=1):
631
+ # n=1 return the last layer attn map; otherwise return attn maps in all layers
632
+
633
+
634
+ x = self.patch_embed(x)
635
+ if self.ape:
636
+ x = x + self.absolute_pos_embed
637
+ x = self.pos_drop(x)
638
+
639
+ if n==1:
640
+ return self.forward_last_selfattention(x)
641
+ else:
642
+ return self.forward_all_selfattention(x)
643
+
644
+ def forward_last_selfattention(self, x):
645
+
646
+ for i, layer in enumerate(self.layers):
647
+ if i < len(self.layers) - 1:
648
+ x = layer(x)
649
+ else:
650
+ x, attns = layer.forward_with_attention(x)
651
+ return attns[-1]
652
+
653
+ def forward_all_selfattention(self, x):
654
+ attn_out = []
655
+
656
+ for layer in self.layers:
657
+ x, attns = layer.forward_with_attention(x)
658
+ attn_out += attns
659
+
660
+ return attn_out
661
+
662
+
663
+ def forward_return_n_last_blocks(self, x, n=1, return_patch_avgpool=False, depth=[]):
664
+
665
+ num_blks = sum(depth)
666
+ start_idx = num_blks - n
667
+
668
+ sum_cur = 0
669
+ for i, d in enumerate(depth):
670
+ sum_cur_new = sum_cur + d
671
+ if start_idx >= sum_cur and start_idx < sum_cur_new:
672
+ start_stage = i
673
+ start_blk = start_idx - sum_cur
674
+ sum_cur = sum_cur_new
675
+
676
+
677
+ x = self.patch_embed(x)
678
+ if self.ape:
679
+ x = x + self.absolute_pos_embed
680
+ x = self.pos_drop(x)
681
+
682
+ # we will return the averaged token features from the `n` last blocks
683
+ # note: there is no [CLS] token in Swin Transformer
684
+ output = []
685
+ s = 0
686
+ for i, layer in enumerate(self.layers):
687
+ x, fea = layer.forward_with_features(x)
688
+
689
+ if i >= start_stage:
690
+ for x_ in fea[start_blk:]:
691
+
692
+ if i == len(self.layers)-1: # use the norm in the last stage
693
+ x_ = self.norm(x_)
694
+
695
+ x_avg = torch.flatten(self.avgpool(x_.transpose(1, 2)), 1) # B C
696
+ # print(f'Stage {i}, x_avg {x_avg.shape}')
697
+ output.append(x_avg)
698
+
699
+ start_blk = 0
700
+
701
+ return torch.cat(output, dim=-1)
702
+
703
+
704
+
705
+ def flops(self):
706
+ flops = 0
707
+ flops += self.patch_embed.flops()
708
+ for i, layer in enumerate(self.layers):
709
+ flops += layer.flops()
710
+ if dist.get_rank() == 0:
711
+ print(f"GFLOPs layer_{i}: {layer.flops() / 1e9}")
712
+ flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
713
+ flops += self.num_features * self.num_classes
714
+ return flops
715
+
716
+ def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
717
+ if os.path.isfile(pretrained):
718
+ pretrained_dict = torch.load(pretrained, map_location='cpu')
719
+ logging.info(f'=> loading pretrained model {pretrained}')
720
+ model_dict = self.state_dict()
721
+ pretrained_dict = {
722
+ k: v for k, v in pretrained_dict.items()
723
+ if k in model_dict.keys()
724
+ }
725
+ need_init_state_dict = {}
726
+ for k, v in pretrained_dict.items():
727
+ need_init = (
728
+ k.split('.')[0] in pretrained_layers
729
+ or pretrained_layers[0] is '*'
730
+ or 'relative_position_index' not in k
731
+ or 'attn_mask' not in k
732
+ )
733
+
734
+ if need_init:
735
+ if verbose:
736
+ logging.info(f'=> init {k} from {pretrained}')
737
+
738
+ if 'relative_position_bias_table' in k and v.size() != model_dict[k].size():
739
+ relative_position_bias_table_pretrained = v
740
+ relative_position_bias_table_current = model_dict[k]
741
+ L1, nH1 = relative_position_bias_table_pretrained.size()
742
+ L2, nH2 = relative_position_bias_table_current.size()
743
+ if nH1 != nH2:
744
+ logging.info(f"Error in loading {k}, passing")
745
+ else:
746
+ if L1 != L2:
747
+ logging.info(
748
+ '=> load_pretrained: resized variant: {} to {}'
749
+ .format((L1, nH1), (L2, nH2))
750
+ )
751
+ S1 = int(L1 ** 0.5)
752
+ S2 = int(L2 ** 0.5)
753
+ relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
754
+ relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
755
+ size=(S2, S2),
756
+ mode='bicubic')
757
+ v = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
758
+
759
+ if 'absolute_pos_embed' in k and v.size() != model_dict[k].size():
760
+ absolute_pos_embed_pretrained = v
761
+ absolute_pos_embed_current = model_dict[k]
762
+ _, L1, C1 = absolute_pos_embed_pretrained.size()
763
+ _, L2, C2 = absolute_pos_embed_current.size()
764
+ if C1 != C1:
765
+ logging.info(f"Error in loading {k}, passing")
766
+ else:
767
+ if L1 != L2:
768
+ logging.info(
769
+ '=> load_pretrained: resized variant: {} to {}'
770
+ .format((1, L1, C1), (1, L2, C2))
771
+ )
772
+ S1 = int(L1 ** 0.5)
773
+ S2 = int(L2 ** 0.5)
774
+ absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
775
+ absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
776
+ absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
777
+ absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
778
+ v = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1).flatten(1, 2)
779
+
780
+ need_init_state_dict[k] = v
781
+ self.load_state_dict(need_init_state_dict, strict=False)
782
+
783
+ def freeze_pretrained_layers(self, frozen_layers=[]):
784
+ for name, module in self.named_modules():
785
+ if (
786
+ name.split('.')[0] in frozen_layers
787
+ or '.'.join(name.split('.')[0:2]) in frozen_layers
788
+ or (len(frozen_layers) > 0 and frozen_layers[0] is '*')
789
+ ):
790
+ for _name, param in module.named_parameters():
791
+ param.requires_grad = False
792
+ logging.info(
793
+ '=> set param {} requires grad to False'
794
+ .format(name)
795
+ )
796
+ for name, param in self.named_parameters():
797
+ if (
798
+ name.split('.')[0] in frozen_layers
799
+ or (len(frozen_layers) > 0 and frozen_layers[0] is '*')
800
+ and param.requires_grad is True
801
+ ):
802
+ param.requires_grad = False
803
+ logging.info(
804
+ '=> set param {} requires grad to False'
805
+ .format(name)
806
+ )
807
+ return self
808
+
809
+
810
+ def get_swin(is_teacher=False):
811
+ args = get_args()
812
+
813
+ if args.swin_backbone_type == "tiny":
814
+ embed_dim = 96
815
+ depths = [2, 2, 6, 2]
816
+ num_heads = [3, 6, 12, 24]
817
+ drop_path_rate = 0.1
818
+ elif args.swin_backbone_type == 'h3':
819
+ embed_dim = 384
820
+ depths = [2, 2, 18, 2]
821
+ num_heads = [6, 12, 24, 48]
822
+ drop_path_rate = 0.2
823
+ else:
824
+ embed_dim = 128
825
+ depths = [2, 2, 18, 2]
826
+ num_heads = [4, 8, 16, 32]
827
+ drop_path_rate = 0.2
828
+
829
+ swin = SwinTransformer(
830
+ img_size=224,
831
+ in_chans=3,
832
+ num_classes=1000,
833
+ patch_size=4,
834
+ embed_dim=embed_dim,
835
+ depths=depths,
836
+ num_heads=num_heads,
837
+ window_size=7,
838
+ mlp_ratio=4,
839
+ qkv_bias=True,
840
+ drop_rate=0,
841
+ attn_drop_rate=0,
842
+ drop_path_rate=(0.0 if is_teacher else drop_path_rate),
843
+ norm_layer=partial(LayerNorm, eps=1e-6),
844
+ ape=False,
845
+ patch_norm=True,
846
+ )
847
+
848
+ return swin
849
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/inpainting.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This source code is licensed under the BSD license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ i
6
+ import math
7
+ import apex
8
+ import einops
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from megatron import get_args, print_rank_0
12
+ from megatron.model.utils import get_linear_layer
13
+ from megatron.model.vision.vit_backbone import VitBackbone
14
+ from megatron.model.module import MegatronModule
15
+ from megatron.model.vision.mit_backbone import mit_b3
16
+ from megatron.model.vision.utils import resize_
17
+
18
+
19
+ class VitInpaintingModel(MegatronModule):
20
+
21
+ def __init__(self, config, pre_process=True, post_process=True):
22
+ super(VitInpaintingModel, self).__init__()
23
+ args = get_args()
24
+
25
+ self.pre_process = pre_process
26
+ self.post_process = post_process
27
+ self.hidden_size = config.hidden_size
28
+ self.backbone = VitBackbone(
29
+ config=config,
30
+ pre_process=self.pre_process,
31
+ post_process=self.post_process,
32
+ class_token=False,
33
+ )
34
+ self.patch_dim = args.patch_dim
35
+ self.img_h = args.img_h
36
+ self.img_w = args.img_w
37
+ self.seq_length = args.seq_length
38
+ # full mask
39
+
40
+ if self.post_process:
41
+ self.linear_decoder = get_linear_layer(
42
+ self.hidden_size,
43
+ self.backbone.flatten_dim,
44
+ torch.nn.init.zeros_,
45
+ gather_params_on_init=args.zero_stage == 3
46
+ )
47
+
48
+ def set_input_tensor(self, input_tensor):
49
+ self.backbone.set_input_tensor(input_tensor)
50
+
51
+ def forward(self, input):
52
+
53
+ hidden_states = self.backbone(input)
54
+
55
+ if not self.post_process:
56
+ return hidden_states
57
+ decoded_output = self.linear_decoder(hidden_states)
58
+ output = einops.rearrange(
59
+ decoded_output,
60
+ "b (h w) (p1 p2 c) -> b c (h p1) (w p2)",
61
+ p1=self.patch_dim,
62
+ p2=self.patch_dim,
63
+ h=self.img_h//self.patch_dim,
64
+ w=self.img_w//self.patch_dim,
65
+ )
66
+
67
+ return output
68
+
69
+
70
+ class MLP(torch.nn.Module):
71
+ """
72
+ Linear Embedding
73
+ """
74
+ def __init__(self, input_dim=2048, embed_dim=768):
75
+ super().__init__()
76
+ self.proj = torch.nn.Linear(input_dim, embed_dim)
77
+
78
+ def forward(self, x):
79
+ x = x.flatten(2).transpose(1, 2)
80
+ x = self.proj(x)
81
+ return x
82
+
83
+
84
+ class MitInpaintingModel(MegatronModule):
85
+ """Mix vision Transformer Model."""
86
+
87
+ def __init__(self, pre_process=True, post_process=True):
88
+ super(MitInpaintingModel, self).__init__()
89
+ self.pre_process = pre_process
90
+ self.post_process = post_process
91
+
92
+ args = get_args()
93
+ self.patch_dim = args.patch_dim
94
+ self.img_h = args.img_h
95
+ self.img_w = args.img_w
96
+ self.flatten_dim = self.patch_dim * self.patch_dim * 3
97
+ self.backbone = mit_b3()
98
+
99
+ self.in_channels = [64, 128, 320, 512]
100
+ self.embedding_dim = 768
101
+
102
+ c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels
103
+
104
+ self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=self.embedding_dim)
105
+ self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=self.embedding_dim)
106
+ self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=self.embedding_dim)
107
+ self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=self.embedding_dim)
108
+
109
+ self.conv_fuse = torch.nn.Conv2d(self.embedding_dim*4, self.embedding_dim, 1, 1, bias=False)
110
+ self.norm = apex.parallel.SyncBatchNorm(self.embedding_dim)
111
+ self.dropout = torch.nn.Dropout2d(0.1)
112
+
113
+ self.linear_pred = torch.nn.Conv2d(self.embedding_dim, self.flatten_dim, kernel_size=1)
114
+
115
+ def set_input_tensor(self, input_tensor):
116
+ """See megatron.model.transformer.set_input_tensor()"""
117
+ pass
118
+
119
+ def forward(self, input):
120
+ c1, c2, c3, c4 = self.backbone(input)
121
+
122
+ n, _, h, w = c4.shape
123
+ _c4 = self.linear_c4(c4).permute(0, 2, 1).reshape(n, -1, c4.shape[2], c4.shape[3])
124
+ _c4 = resize(_c4, size=c1.size()[2:], mode='bilinear', align_corners=False)
125
+
126
+ _c3 = self.linear_c3(c3).permute(0, 2, 1).reshape(n, -1, c3.shape[2], c3.shape[3])
127
+ _c3 = resize(_c3, size=c1.size()[2:], mode='bilinear', align_corners=False)
128
+
129
+ _c2 = self.linear_c2(c2).permute(0, 2, 1).reshape(n, -1, c2.shape[2], c2.shape[3])
130
+ _c2 = resize(_c2, size=c1.size()[2:], mode='bilinear', align_corners=False)
131
+
132
+ _c1 = self.linear_c1(c1).permute(0, 2, 1).reshape(n, -1, c1.shape[2], c1.shape[3])
133
+
134
+ _c = torch.cat([_c4, _c3, _c2, _c1], dim=1)
135
+ _c = self.conv_fuse(_c)
136
+
137
+ x = self.norm(_c)
138
+ x = F.relu(x, inplace=True)
139
+ x = self.dropout(x)
140
+
141
+ x = self.linear_pred(x)
142
+
143
+ output = einops.rearrange(
144
+ x,
145
+ "b (c p1 p2) h w -> b c (h p1) (w p2)",
146
+ p1=self.patch_dim,
147
+ p2=self.patch_dim,
148
+ h=self.img_h//self.patch_dim,
149
+ w=self.img_w//self.patch_dim,
150
+ )
151
+
152
+ return output
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/knn_monitor.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn.functional as F
2
+ import torch
3
+ from megatron import print_rank_0, get_args
4
+ from megatron.core import mpu
5
+ from megatron.data.vit_dataset import ClassificationTransform
6
+ from megatron.data.image_folder import ImageFolder
7
+
8
+ _FEATURE_BANK = None
9
+
10
+
11
+ def build_data_loader(dataset, drop_last=True, shuffle=False):
12
+ """Data loader. Note that batch-size is the local (per GPU) batch-size."""
13
+ # Sampler.
14
+ args = get_args()
15
+ micro_batch_size = 16
16
+ num_workers = args.num_workers
17
+ world_size = mpu.get_data_parallel_world_size()
18
+ rank = mpu.get_data_parallel_rank()
19
+ sampler = torch.utils.data.distributed.DistributedSampler(
20
+ dataset, num_replicas=world_size, rank=rank,
21
+ drop_last=drop_last, shuffle=shuffle
22
+ )
23
+
24
+ # Data loader. Note that batch size is the per GPU batch size.
25
+ data_loader = torch.utils.data.DataLoader(
26
+ dataset,
27
+ batch_size=micro_batch_size,
28
+ sampler=sampler,
29
+ shuffle=False,
30
+ num_workers=num_workers,
31
+ drop_last=not drop_last,
32
+ pin_memory=True,
33
+ )
34
+ return data_loader
35
+
36
+
37
+ def compute_feature_bank(model):
38
+ args = get_args()
39
+ global _FEATURE_BANK
40
+ feature_bank = []
41
+ feature_label = []
42
+
43
+ train_ds = ImageFolder(
44
+ root=args.data_path[0],
45
+ transform=ClassificationTransform((args.img_h, args.img_w), train=False),
46
+ data_per_class_fraction=1.0
47
+ )
48
+ classes = len(train_ds.classes)
49
+ dataloader = build_data_loader(train_ds)
50
+
51
+ for m in model:
52
+ m.eval()
53
+
54
+ with torch.no_grad():
55
+ for i, batch in enumerate(dataloader):
56
+ images = batch[0].cuda().contiguous()
57
+ labels = batch[1].cuda().contiguous()
58
+ student_feature, teacher_feature = model[0](images)
59
+ feature = F.normalize(teacher_feature.float(), dim=1)
60
+ feature_bank.append(feature)
61
+ feature_label.append(labels)
62
+
63
+ for m in model:
64
+ m.train()
65
+
66
+ # [N', D]
67
+ feature_bank = torch.cat(feature_bank, dim=0).contiguous()
68
+ feature_label = torch.cat(feature_label, dim=0).contiguous()
69
+
70
+ feature_banks = [torch.zeros_like(feature_bank)
71
+ for i in range(mpu.get_data_parallel_world_size())]
72
+ torch.distributed.all_gather(feature_banks,
73
+ feature_bank,
74
+ group=mpu.get_data_parallel_group())
75
+
76
+ assert torch.all(torch.eq(feature_banks[mpu.get_data_parallel_rank()],
77
+ feature_bank))
78
+
79
+ feature_labels = [torch.zeros_like(feature_label)
80
+ for i in range(mpu.get_data_parallel_world_size())]
81
+ torch.distributed.all_gather(feature_labels,
82
+ feature_label,
83
+ group=mpu.get_data_parallel_group())
84
+
85
+ # [D, N]
86
+ feature_banks = torch.cat(feature_banks, dim=0).t().contiguous()
87
+ # [N]
88
+ feature_labels = torch.cat(feature_labels, dim=0).contiguous()
89
+ print_rank_0("feature_banks size is {}".format(feature_banks.size()))
90
+ print_rank_0("feature labels size is {}".format(feature_labels.size()))
91
+
92
+ _FEATURE_BANK = (feature_banks, feature_labels, classes)
93
+
94
+
95
+ def get_feature_bank():
96
+ global _FEATURE_BANK
97
+ assert _FEATURE_BANK is not None
98
+ return _FEATURE_BANK
99
+
100
+
101
+ # knn monitor as in InstDisc https://arxiv.org/abs/1805.01978
102
+ # implementation follows http://github.com/zhirongw/lemniscate.pytorch and
103
+ # https://github.com/leftthomas/SimCLR
104
+ def knn_predict(feature, feature_bank, feature_labels, classes, knn_k, knn_t):
105
+ # compute cos similarity between each feature vector and feature bank ---> [B, N]
106
+ sim_matrix = torch.mm(feature, feature_bank)
107
+ # [B, K]
108
+ sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1)
109
+ # [B, K]
110
+ sim_labels = torch.gather(feature_labels.expand(feature.size(0), -1),
111
+ dim=-1,
112
+ index=sim_indices)
113
+ sim_weight = (sim_weight / knn_t).exp()
114
+
115
+ # counts for each class
116
+ one_hot_label = torch.zeros(feature.size(0) * knn_k,
117
+ classes,
118
+ device=sim_labels.device)
119
+ # [B*K, C]
120
+ one_hot_label = one_hot_label.scatter(dim=-1,
121
+ index=sim_labels.view(-1, 1),
122
+ value=1.0)
123
+ # weighted score ---> [B, C]
124
+ pred_scores = torch.sum(
125
+ one_hot_label.view(feature.size(0), -1, classes) * sim_weight.unsqueeze(dim=-1),
126
+ dim=1)
127
+
128
+ pred_labels = pred_scores.argsort(dim=-1, descending=True)
129
+ return pred_labels
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/mit_backbone.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ---------------------------------------------------------------
2
+ # Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
3
+ #
4
+ # This work is licensed under the NVIDIA Source Code License
5
+ # found in the LICENSE file in the root directory of this
6
+ # source tree.
7
+ # ---------------------------------------------------------------
8
+ import math
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from functools import partial
13
+ from torch.nn.init import trunc_normal_
14
+ from megatron.model.transformer import DropPath
15
+ from megatron.model import LayerNorm
16
+
17
+
18
+ class Mlp(nn.Module):
19
+ def __init__(self,
20
+ in_features,
21
+ hidden_features=None,
22
+ out_features=None,
23
+ act_layer=nn.GELU,
24
+ drop=0.):
25
+ super().__init__()
26
+ out_features = out_features or in_features
27
+ hidden_features = hidden_features or in_features
28
+ self.fc1 = nn.Linear(in_features, hidden_features)
29
+ self.dwconv = DWConv(hidden_features)
30
+ self.act = act_layer()
31
+ self.fc2 = nn.Linear(hidden_features, out_features)
32
+ self.drop = nn.Dropout(drop)
33
+
34
+ self.apply(self._init_weights)
35
+
36
+ def _init_weights(self, m):
37
+ if isinstance(m, nn.Linear):
38
+ trunc_normal_(m.weight, std=.02)
39
+ if isinstance(m, nn.Linear) and m.bias is not None:
40
+ nn.init.constant_(m.bias, 0)
41
+ elif isinstance(m, nn.LayerNorm):
42
+ nn.init.constant_(m.bias, 0)
43
+ nn.init.constant_(m.weight, 1.0)
44
+ elif isinstance(m, nn.Conv2d):
45
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
46
+ fan_out //= m.groups
47
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
48
+ if m.bias is not None:
49
+ m.bias.data.zero_()
50
+
51
+ def forward(self, x, H, W):
52
+ x = self.fc1(x)
53
+ x = self.dwconv(x, H, W)
54
+ x = self.act(x)
55
+ x = self.drop(x)
56
+ x = self.fc2(x)
57
+ x = self.drop(x)
58
+ return x
59
+
60
+
61
+ class Attention(nn.Module):
62
+ def __init__(self,
63
+ dim,
64
+ num_heads=8,
65
+ qkv_bias=False,
66
+ qk_scale=None,
67
+ attn_drop=0.,
68
+ proj_drop=0.,
69
+ sr_ratio=1):
70
+ super().__init__()
71
+ assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
72
+
73
+ self.dim = dim
74
+ self.num_heads = num_heads
75
+ head_dim = dim // num_heads
76
+ self.scale = qk_scale or head_dim ** -0.5
77
+
78
+ self.q = nn.Linear(dim, dim, bias=qkv_bias)
79
+ self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
80
+ self.attn_drop = nn.Dropout(attn_drop)
81
+ self.proj = nn.Linear(dim, dim)
82
+ self.proj_drop = nn.Dropout(proj_drop)
83
+
84
+ self.sr_ratio = sr_ratio
85
+ if sr_ratio > 1:
86
+ self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
87
+ self.norm = LayerNorm(dim)
88
+
89
+ self.apply(self._init_weights)
90
+
91
+ def _init_weights(self, m):
92
+ if isinstance(m, nn.Linear):
93
+ trunc_normal_(m.weight, std=.02)
94
+ if isinstance(m, nn.Linear) and m.bias is not None:
95
+ nn.init.constant_(m.bias, 0)
96
+ elif isinstance(m, nn.LayerNorm):
97
+ nn.init.constant_(m.bias, 0)
98
+ nn.init.constant_(m.weight, 1.0)
99
+ elif isinstance(m, nn.Conv2d):
100
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
101
+ fan_out //= m.groups
102
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
103
+ if m.bias is not None:
104
+ m.bias.data.zero_()
105
+
106
+ def forward(self, x, H, W):
107
+ B, N, C = x.shape
108
+ q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
109
+
110
+ if self.sr_ratio > 1:
111
+ x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
112
+ x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
113
+ x_ = self.norm(x_)
114
+ kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
115
+ else:
116
+ kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
117
+ k, v = kv[0], kv[1]
118
+
119
+ attn = (q @ k.transpose(-2, -1)) * self.scale
120
+ attn = attn.softmax(dim=-1)
121
+ attn = self.attn_drop(attn)
122
+
123
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
124
+ x = self.proj(x)
125
+ x = self.proj_drop(x)
126
+
127
+ return x
128
+
129
+
130
+ class Block(nn.Module):
131
+
132
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
133
+ drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, sr_ratio=1):
134
+ super().__init__()
135
+ self.norm1 = norm_layer(dim)
136
+ self.attn = Attention(
137
+ dim,
138
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
139
+ attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
140
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
141
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
142
+ self.norm2 = norm_layer(dim)
143
+ mlp_hidden_dim = int(dim * mlp_ratio)
144
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
145
+
146
+ self.apply(self._init_weights)
147
+
148
+ def _init_weights(self, m):
149
+ if isinstance(m, nn.Linear):
150
+ trunc_normal_(m.weight, std=.02)
151
+ if isinstance(m, nn.Linear) and m.bias is not None:
152
+ nn.init.constant_(m.bias, 0)
153
+ elif isinstance(m, nn.LayerNorm):
154
+ nn.init.constant_(m.bias, 0)
155
+ nn.init.constant_(m.weight, 1.0)
156
+ elif isinstance(m, nn.Conv2d):
157
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
158
+ fan_out //= m.groups
159
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
160
+ if m.bias is not None:
161
+ m.bias.data.zero_()
162
+
163
+ def forward(self, x, H, W):
164
+ x = x + self.drop_path(self.attn(self.norm1(x), H, W))
165
+ x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
166
+
167
+ return x
168
+
169
+
170
+ class OverlapPatchEmbed(nn.Module):
171
+ """ Image to Patch Embedding
172
+ """
173
+
174
+ def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
175
+ super().__init__()
176
+ img_size = (img_size, img_size)
177
+ patch_size = (patch_size, patch_size)
178
+
179
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
180
+ padding=(patch_size[0] // 2, patch_size[1] // 2))
181
+ self.norm = LayerNorm(embed_dim)
182
+
183
+ self.apply(self._init_weights)
184
+
185
+ def _init_weights(self, m):
186
+ if isinstance(m, nn.Linear):
187
+ trunc_normal_(m.weight, std=.02)
188
+ if isinstance(m, nn.Linear) and m.bias is not None:
189
+ nn.init.constant_(m.bias, 0)
190
+ elif isinstance(m, nn.LayerNorm):
191
+ nn.init.constant_(m.bias, 0)
192
+ nn.init.constant_(m.weight, 1.0)
193
+ elif isinstance(m, nn.Conv2d):
194
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
195
+ fan_out //= m.groups
196
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
197
+ if m.bias is not None:
198
+ m.bias.data.zero_()
199
+
200
+ def forward(self, x):
201
+ x = self.proj(x)
202
+ _, _, H, W = x.shape
203
+ x = x.flatten(2).transpose(1, 2)
204
+ x = self.norm(x)
205
+
206
+ return x, H, W
207
+
208
+
209
+ class MixVisionTransformer(nn.Module):
210
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
211
+ num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
212
+ attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm,
213
+ depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], output_avg=False):
214
+ super().__init__()
215
+ self.num_classes = num_classes
216
+ self.depths = depths
217
+ self.output_avg = output_avg
218
+
219
+ # patch_embed
220
+ self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
221
+ embed_dim=embed_dims[0])
222
+ self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
223
+ embed_dim=embed_dims[1])
224
+ self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
225
+ embed_dim=embed_dims[2])
226
+ self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
227
+ embed_dim=embed_dims[3])
228
+
229
+ # transformer encoder
230
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
231
+ cur = 0
232
+ self.block1 = nn.ModuleList([Block(
233
+ dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
234
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
235
+ sr_ratio=sr_ratios[0])
236
+ for i in range(depths[0])])
237
+ self.norm1 = norm_layer(embed_dims[0])
238
+
239
+ cur += depths[0]
240
+ self.block2 = nn.ModuleList([Block(
241
+ dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
242
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
243
+ sr_ratio=sr_ratios[1])
244
+ for i in range(depths[1])])
245
+ self.norm2 = norm_layer(embed_dims[1])
246
+
247
+ cur += depths[1]
248
+ self.block3 = nn.ModuleList([Block(
249
+ dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
250
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
251
+ sr_ratio=sr_ratios[2])
252
+ for i in range(depths[2])])
253
+ self.norm3 = norm_layer(embed_dims[2])
254
+
255
+ cur += depths[2]
256
+ self.block4 = nn.ModuleList([Block(
257
+ dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
258
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
259
+ sr_ratio=sr_ratios[3])
260
+ for i in range(depths[3])])
261
+ self.norm4 = norm_layer(embed_dims[3])
262
+
263
+ self.apply(self._init_weights)
264
+
265
+ def _init_weights(self, m):
266
+ if isinstance(m, nn.Linear):
267
+ trunc_normal_(m.weight, std=.02)
268
+ if isinstance(m, nn.Linear) and m.bias is not None:
269
+ nn.init.constant_(m.bias, 0)
270
+ elif isinstance(m, nn.LayerNorm):
271
+ nn.init.constant_(m.bias, 0)
272
+ nn.init.constant_(m.weight, 1.0)
273
+ elif isinstance(m, nn.Conv2d):
274
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
275
+ fan_out //= m.groups
276
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
277
+ if m.bias is not None:
278
+ m.bias.data.zero_()
279
+
280
+ def reset_drop_path(self, drop_path_rate):
281
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
282
+ cur = 0
283
+ for i in range(self.depths[0]):
284
+ self.block1[i].drop_path.drop_prob = dpr[cur + i]
285
+
286
+ cur += self.depths[0]
287
+ for i in range(self.depths[1]):
288
+ self.block2[i].drop_path.drop_prob = dpr[cur + i]
289
+
290
+ cur += self.depths[1]
291
+ for i in range(self.depths[2]):
292
+ self.block3[i].drop_path.drop_prob = dpr[cur + i]
293
+
294
+ cur += self.depths[2]
295
+ for i in range(self.depths[3]):
296
+ self.block4[i].drop_path.drop_prob = dpr[cur + i]
297
+
298
+ def freeze_patch_emb(self):
299
+ self.patch_embed1.requires_grad = False
300
+
301
+ def forward_features(self, x):
302
+ B = x.shape[0]
303
+ outs = []
304
+
305
+ # stage 1
306
+ x, H, W = self.patch_embed1(x)
307
+ for i, blk in enumerate(self.block1):
308
+ x = blk(x, H, W)
309
+ x = self.norm1(x)
310
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
311
+ outs.append(x)
312
+
313
+ # stage 2
314
+ x, H, W = self.patch_embed2(x)
315
+ for i, blk in enumerate(self.block2):
316
+ x = blk(x, H, W)
317
+ x = self.norm2(x)
318
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
319
+ outs.append(x)
320
+
321
+ # stage 3
322
+ x, H, W = self.patch_embed3(x)
323
+ for i, blk in enumerate(self.block3):
324
+ x = blk(x, H, W)
325
+ x = self.norm3(x)
326
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
327
+ outs.append(x)
328
+
329
+ # stage 4
330
+ x, H, W = self.patch_embed4(x)
331
+ for i, blk in enumerate(self.block4):
332
+ x = blk(x, H, W)
333
+ x = self.norm4(x)
334
+ if not self.output_avg:
335
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
336
+ outs.append(x)
337
+
338
+ return outs
339
+
340
+ def forward(self, x):
341
+ x = self.forward_features(x)
342
+
343
+ if self.output_avg:
344
+ x = x[3].mean(dim=1)
345
+
346
+ return x
347
+
348
+
349
+ class DWConv(nn.Module):
350
+ def __init__(self, dim=768):
351
+ super(DWConv, self).__init__()
352
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
353
+
354
+ def forward(self, x, H, W):
355
+ B, N, C = x.shape
356
+ x = x.transpose(1, 2).view(B, C, H, W)
357
+ x = self.dwconv(x)
358
+ x = x.flatten(2).transpose(1, 2)
359
+
360
+ return x
361
+
362
+ class mit_b0(MixVisionTransformer):
363
+ def __init__(self, **kwargs):
364
+ super(mit_b0, self).__init__(
365
+ patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
366
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
367
+ drop_rate=0.0, drop_path_rate=0.1)
368
+
369
+
370
+ class mit_b1(MixVisionTransformer):
371
+ def __init__(self, **kwargs):
372
+ super(mit_b1, self).__init__(
373
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
374
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
375
+ drop_rate=0.0, drop_path_rate=0.1)
376
+
377
+
378
+ class mit_b2(MixVisionTransformer):
379
+ def __init__(self, **kwargs):
380
+ super(mit_b2, self).__init__(
381
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
382
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
383
+ drop_rate=0.0, drop_path_rate=0.1)
384
+
385
+
386
+ class mit_b3(MixVisionTransformer):
387
+ def __init__(self, **kwargs):
388
+ super(mit_b3, self).__init__(
389
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
390
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
391
+ drop_rate=0.0, drop_path_rate=0.1)
392
+
393
+ class mit_b3_avg(MixVisionTransformer):
394
+ def __init__(self, drop_path_rate=0.1, **kwargs):
395
+ super(mit_b3_avg, self).__init__(
396
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
397
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
398
+ drop_rate=0.0, drop_path_rate=drop_path_rate, output_avg=True)
399
+
400
+ class mit_b4(MixVisionTransformer):
401
+ def __init__(self, **kwargs):
402
+ super(mit_b4, self).__init__(
403
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
404
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1],
405
+ drop_rate=0.0, drop_path_rate=0.1)
406
+
407
+ class mit_b5(MixVisionTransformer):
408
+ def __init__(self, **kwargs):
409
+ super(mit_b5, self).__init__(
410
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
411
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],
412
+ drop_rate=0.0, drop_path_rate=0.1)
413
+
414
+ class mit_b5_avg(MixVisionTransformer):
415
+ def __init__(self, drop_path_rate=0.1, **kwargs):
416
+ super(mit_b5_avg, self).__init__(
417
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
418
+ qkv_bias=True, norm_layer=partial(LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],
419
+ drop_rate=0.0, drop_path_rate=drop_path_rate, output_avg=True)
420
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/swin_backbone.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Microsoft
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ # --------------------------------------------------------
6
+ # Swin Transformer
7
+ # --------------------------------------------------------
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint as checkpoint
12
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
13
+ from math import sqrt
14
+
15
+ from megatron import get_args
16
+ from functools import partial
17
+
18
+
19
+ class Mlp(nn.Module):
20
+ def __init__(self, in_features, hidden_features=None,
21
+ out_features=None, act_layer=nn.GELU, drop=0.):
22
+ super().__init__()
23
+ out_features = out_features or in_features
24
+ hidden_features = hidden_features or in_features
25
+ self.fc1 = nn.Linear(in_features, hidden_features)
26
+ self.act = act_layer()
27
+ self.fc2 = nn.Linear(hidden_features, out_features)
28
+ self.drop = nn.Dropout(drop)
29
+
30
+ def forward(self, x):
31
+ x = self.fc1(x)
32
+ x = self.act(x)
33
+ x = self.drop(x)
34
+ x = self.fc2(x)
35
+ x = self.drop(x)
36
+ return x
37
+
38
+
39
+ def window_partition(x, window_size):
40
+ """
41
+ Args:
42
+ x: (B, H, W, C)
43
+ window_size (int): window size
44
+
45
+ Returns:
46
+ windows: (num_windows*B, window_size, window_size, C)
47
+ """
48
+ B, H, W, C = x.shape
49
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
50
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
51
+ return windows
52
+
53
+
54
+ def window_reverse(windows, window_size, H, W):
55
+ """
56
+ Args:
57
+ windows: (num_windows*B, window_size, window_size, C)
58
+ window_size (int): Window size
59
+ H (int): Height of image
60
+ W (int): Width of image
61
+
62
+ Returns:
63
+ x: (B, H, W, C)
64
+ """
65
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
66
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
67
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
68
+ return x
69
+
70
+
71
+ class WindowAttention(nn.Module):
72
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
73
+ It supports both of shifted and non-shifted window.
74
+
75
+ Args:
76
+ dim (int): Number of input channels.
77
+ window_size (tuple[int]): The height and width of the window.
78
+ num_heads (int): Number of attention heads.
79
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
80
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
81
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
82
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
83
+ """
84
+
85
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
86
+
87
+ super().__init__()
88
+ self.dim = dim
89
+ self.window_size = window_size # Wh, Ww
90
+ self.num_heads = num_heads
91
+ head_dim = dim // num_heads
92
+ self.scale = qk_scale or head_dim ** -0.5
93
+
94
+ # define a parameter table of relative position bias
95
+ self.relative_position_bias_table = nn.Parameter(
96
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
97
+
98
+ # get pair-wise relative position index for each token inside the window
99
+ coords_h = torch.arange(self.window_size[0])
100
+ coords_w = torch.arange(self.window_size[1])
101
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
102
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
103
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
104
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
105
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
106
+ relative_coords[:, :, 1] += self.window_size[1] - 1
107
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
108
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
109
+ self.register_buffer("relative_position_index", relative_position_index)
110
+
111
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
112
+ self.attn_drop = nn.Dropout(attn_drop)
113
+ self.proj = nn.Linear(dim, dim)
114
+ self.proj_drop = nn.Dropout(proj_drop)
115
+
116
+ trunc_normal_(self.relative_position_bias_table, std=.02)
117
+ self.softmax = nn.Softmax(dim=-1)
118
+
119
+ def forward(self, x, mask=None):
120
+ """
121
+ Args:
122
+ x: input features with shape of (num_windows*B, N, C)
123
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
124
+ """
125
+ B_, N, C = x.shape
126
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
127
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
128
+
129
+ q = q * self.scale
130
+ attn = (q @ k.transpose(-2, -1))
131
+
132
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
133
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
134
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
135
+ attn = attn + relative_position_bias.unsqueeze(0)
136
+
137
+ if mask is not None:
138
+ nW = mask.shape[0]
139
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
140
+ attn = attn.view(-1, self.num_heads, N, N)
141
+ attn = self.softmax(attn)
142
+ else:
143
+ attn = self.softmax(attn)
144
+
145
+ attn = self.attn_drop(attn)
146
+
147
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
148
+ x = self.proj(x)
149
+ x = self.proj_drop(x)
150
+ return x
151
+
152
+ def extra_repr(self) -> str:
153
+ return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
154
+
155
+ def flops(self, N):
156
+ # calculate flops for 1 window with token length of N
157
+ flops = 0
158
+ # qkv = self.qkv(x)
159
+ flops += N * self.dim * 3 * self.dim
160
+ # attn = (q @ k.transpose(-2, -1))
161
+ flops += self.num_heads * N * (self.dim // self.num_heads) * N
162
+ # x = (attn @ v)
163
+ flops += self.num_heads * N * N * (self.dim // self.num_heads)
164
+ # x = self.proj(x)
165
+ flops += N * self.dim * self.dim
166
+ return flops
167
+
168
+
169
+ class SwinTransformerBlock(nn.Module):
170
+ r""" Swin Transformer Block.
171
+
172
+ Args:
173
+ dim (int): Number of input channels.
174
+ input_resolution (tuple[int]): Input resulotion.
175
+ num_heads (int): Number of attention heads.
176
+ window_size (int): Window size.
177
+ shift_size (int): Shift size for SW-MSA.
178
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
179
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
180
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
181
+ drop (float, optional): Dropout rate. Default: 0.0
182
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
183
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
184
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
185
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
186
+ """
187
+
188
+ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
189
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
190
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
191
+ super().__init__()
192
+ self.dim = dim
193
+ self.input_resolution = input_resolution
194
+ self.num_heads = num_heads
195
+ self.window_size = window_size
196
+ self.shift_size = shift_size
197
+ self.mlp_ratio = mlp_ratio
198
+ if min(self.input_resolution) <= self.window_size:
199
+ # if window size is larger than input resolution, we don't partition windows
200
+ self.shift_size = 0
201
+ self.window_size = min(self.input_resolution)
202
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
203
+
204
+ self.norm1 = norm_layer(dim)
205
+ self.attn = WindowAttention(
206
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
207
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
208
+
209
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
210
+ self.norm2 = norm_layer(dim)
211
+ mlp_hidden_dim = int(dim * mlp_ratio)
212
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
213
+
214
+ self.H = input_resolution[0]
215
+ self.W = input_resolution[1]
216
+
217
+ self.attn_mask_dict = {}
218
+
219
+ def create_attn_mask(self, H, W):
220
+ # calculate attention mask for SW-MSA
221
+
222
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
223
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
224
+ img_mask = torch.zeros((1, Hp, Wp, 1)) # 1 Hp Wp 1
225
+ h_slices = (slice(0, -self.window_size),
226
+ slice(-self.window_size, -self.shift_size),
227
+ slice(-self.shift_size, None))
228
+ w_slices = (slice(0, -self.window_size),
229
+ slice(-self.window_size, -self.shift_size),
230
+ slice(-self.shift_size, None))
231
+ cnt = 0
232
+ for h in h_slices:
233
+ for w in w_slices:
234
+ img_mask[:, h, w, :] = cnt
235
+ cnt += 1
236
+
237
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
238
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
239
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
240
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
241
+
242
+ return attn_mask
243
+
244
+
245
+ def forward(self, x):
246
+ B, L, C = x.shape
247
+ H = int(sqrt(L))
248
+ W = H
249
+
250
+ shortcut = x
251
+ x = self.norm1(x)
252
+ x = x.view(B, H, W, C)
253
+
254
+ # cyclic shift
255
+ if self.shift_size > 0:
256
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
257
+ else:
258
+ shifted_x = x
259
+
260
+ # partition windows
261
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
262
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
263
+
264
+ # W-MSA/SW-MSA
265
+ attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
266
+
267
+ # merge windows
268
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
269
+ shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
270
+
271
+ # reverse cyclic shift
272
+ if self.shift_size > 0:
273
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
274
+ else:
275
+ x = shifted_x
276
+ x = x.view(B, H * W, C)
277
+
278
+ # FFN
279
+ x = shortcut + self.drop_path(x)
280
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
281
+
282
+ return x
283
+
284
+ def extra_repr(self) -> str:
285
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
286
+ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
287
+
288
+ def flops(self):
289
+ flops = 0
290
+ H, W = self.input_resolution
291
+ # norm1
292
+ flops += self.dim * H * W
293
+ # W-MSA/SW-MSA
294
+ nW = H * W / self.window_size / self.window_size
295
+ flops += nW * self.attn.flops(self.window_size * self.window_size)
296
+ # mlp
297
+ flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
298
+ # norm2
299
+ flops += self.dim * H * W
300
+ return flops
301
+
302
+
303
+ class PatchMerging(nn.Module):
304
+ r""" Patch Merging Layer.
305
+
306
+ Args:
307
+ input_resolution (tuple[int]): Resolution of input feature.
308
+ dim (int): Number of input channels.
309
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
310
+ """
311
+
312
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
313
+ super().__init__()
314
+ self.input_resolution = input_resolution
315
+ self.dim = dim
316
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
317
+ self.norm = norm_layer(4 * dim)
318
+
319
+ def forward(self, x):
320
+ """
321
+ x: B, H*W, C
322
+ """
323
+ H, W = self.input_resolution
324
+ B, L, C = x.shape
325
+ assert L == H * W, "input feature has wrong size"
326
+ assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
327
+
328
+ x = x.view(B, H, W, C)
329
+
330
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
331
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
332
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
333
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
334
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
335
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
336
+
337
+ x = self.norm(x)
338
+ x = self.reduction(x)
339
+
340
+ return x
341
+
342
+ def extra_repr(self) -> str:
343
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
344
+
345
+ def flops(self):
346
+ H, W = self.input_resolution
347
+ flops = H * W * self.dim
348
+ flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
349
+ return flops
350
+
351
+
352
+ class BasicLayer(nn.Module):
353
+ """ A basic Swin Transformer layer for one stage.
354
+
355
+ Args:
356
+ dim (int): Number of input channels.
357
+ input_resolution (tuple[int]): Input resolution.
358
+ depth (int): Number of blocks.
359
+ num_heads (int): Number of attention heads.
360
+ window_size (int): Local window size.
361
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
362
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
363
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
364
+ drop (float, optional): Dropout rate. Default: 0.0
365
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
366
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
367
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
368
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
369
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
370
+ """
371
+
372
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
373
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
374
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
375
+
376
+ super().__init__()
377
+ self.dim = dim
378
+ self.input_resolution = input_resolution
379
+ self.depth = depth
380
+ self.use_checkpoint = use_checkpoint
381
+
382
+ # build blocks
383
+ self.blocks = nn.ModuleList([
384
+ SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
385
+ num_heads=num_heads, window_size=window_size,
386
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
387
+ mlp_ratio=mlp_ratio,
388
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
389
+ drop=drop, attn_drop=attn_drop,
390
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
391
+ norm_layer=norm_layer)
392
+ for i in range(depth)])
393
+
394
+ # patch merging layer
395
+ if downsample is not None:
396
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
397
+ else:
398
+ self.downsample = None
399
+
400
+ def forward(self, x):
401
+ for blk in self.blocks:
402
+ if self.use_checkpoint:
403
+ x = checkpoint.checkpoint(blk, x)
404
+ else:
405
+ x = blk(x)
406
+ x_b4_ds = x
407
+ if self.downsample is not None:
408
+ x = self.downsample(x)
409
+ return x_b4_ds, x
410
+
411
+ def extra_repr(self) -> str:
412
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
413
+
414
+ def flops(self):
415
+ flops = 0
416
+ for blk in self.blocks:
417
+ flops += blk.flops()
418
+ if self.downsample is not None:
419
+ flops += self.downsample.flops()
420
+ return flops
421
+
422
+
423
+ class PatchEmbed(nn.Module):
424
+ r""" Image to Patch Embedding
425
+
426
+ Args:
427
+ img_size (int): Image size. Default: 224.
428
+ patch_size (int): Patch token size. Default: 4.
429
+ in_chans (int): Number of input image channels. Default: 3.
430
+ embed_dim (int): Number of linear projection output channels. Default: 96.
431
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
432
+ """
433
+
434
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
435
+ super().__init__()
436
+ img_size = to_2tuple(img_size)
437
+ patch_size = to_2tuple(patch_size)
438
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
439
+ self.img_size = img_size
440
+ self.patch_size = patch_size
441
+ self.patches_resolution = patches_resolution
442
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
443
+
444
+ self.in_chans = in_chans
445
+ self.embed_dim = embed_dim
446
+
447
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
448
+ if norm_layer is not None:
449
+ self.norm = norm_layer(embed_dim)
450
+ else:
451
+ self.norm = None
452
+
453
+ def forward(self, x):
454
+ B, C, H, W = x.shape
455
+ # FIXME look at relaxing size constraints
456
+ assert H == self.img_size[0] and W == self.img_size[1], \
457
+ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
458
+ x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
459
+ if self.norm is not None:
460
+ x = self.norm(x)
461
+ return x
462
+
463
+ def flops(self):
464
+ Ho, Wo = self.patches_resolution
465
+ flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
466
+ if self.norm is not None:
467
+ flops += Ho * Wo * self.embed_dim
468
+ return flops
469
+
470
+
471
+ class SwinTransformer(nn.Module):
472
+ r""" Swin Transformer
473
+ A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
474
+ https://arxiv.org/pdf/2103.14030
475
+
476
+ Args:
477
+ img_size (int | tuple(int)): Input image size. Default 224
478
+ patch_size (int | tuple(int)): Patch size. Default: 4
479
+ in_chans (int): Number of input image channels. Default: 3
480
+ embed_dim (int): Patch embedding dimension. Default: 96
481
+ depths (tuple(int)): Depth of each Swin Transformer layer.
482
+ num_heads (tuple(int)): Number of attention heads in different layers.
483
+ window_size (int): Window size. Default: 7
484
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
485
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
486
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
487
+ drop_rate (float): Dropout rate. Default: 0
488
+ attn_drop_rate (float): Attention dropout rate. Default: 0
489
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
490
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
491
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
492
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
493
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
494
+ """
495
+
496
+ def __init__(self, img_size=224, patch_size=4, in_chans=3,
497
+ embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
498
+ window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
499
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3,
500
+ norm_layer=partial(nn.LayerNorm, eps=1e-6), ape=False, patch_norm=True,
501
+ use_checkpoint=False, output_avg=False, **kwargs):
502
+ super().__init__()
503
+
504
+ self.num_layers = len(depths)
505
+ self.embed_dim = embed_dim
506
+ self.ape = ape
507
+ self.patch_norm = patch_norm
508
+ self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
509
+ self.mlp_ratio = mlp_ratio
510
+ self.img_size = to_2tuple(img_size)
511
+ self.patch_size = to_2tuple(patch_size)
512
+ self.output_avg = output_avg
513
+
514
+ # split image into non-overlapping patches
515
+ self.patch_embed = PatchEmbed(
516
+ img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
517
+ norm_layer=norm_layer if self.patch_norm else None)
518
+ num_patches = self.patch_embed.num_patches
519
+ patches_resolution = self.patch_embed.patches_resolution
520
+ self.patches_resolution = patches_resolution
521
+
522
+ # absolute position embedding
523
+ if self.ape:
524
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
525
+ trunc_normal_(self.absolute_pos_embed, std=.02)
526
+
527
+ self.pos_drop = nn.Dropout(p=drop_rate)
528
+
529
+ # stochastic depth
530
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
531
+
532
+ # build layers
533
+ self.layers = nn.ModuleList()
534
+ for i_layer in range(self.num_layers):
535
+ layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
536
+ input_resolution=(patches_resolution[0] // (2 ** i_layer),
537
+ patches_resolution[1] // (2 ** i_layer)),
538
+ depth=depths[i_layer],
539
+ num_heads=num_heads[i_layer],
540
+ window_size=window_size,
541
+ mlp_ratio=self.mlp_ratio,
542
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
543
+ drop=drop_rate, attn_drop=attn_drop_rate,
544
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
545
+ norm_layer=norm_layer,
546
+ downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
547
+ use_checkpoint=use_checkpoint)
548
+ self.layers.append(layer)
549
+
550
+ self.apply(self._init_weights)
551
+
552
+ def _init_weights(self, m):
553
+ if isinstance(m, nn.Linear):
554
+ trunc_normal_(m.weight, std=.02)
555
+ if isinstance(m, nn.Linear) and m.bias is not None:
556
+ nn.init.constant_(m.bias, 0)
557
+ elif isinstance(m, nn.LayerNorm):
558
+ nn.init.constant_(m.bias, 0)
559
+ nn.init.constant_(m.weight, 1.0)
560
+
561
+ @torch.jit.ignore
562
+ def no_weight_decay(self):
563
+ return {'absolute_pos_embed'}
564
+
565
+ @torch.jit.ignore
566
+ def no_weight_decay_keywords(self):
567
+ return {'relative_position_bias_table'}
568
+
569
+ def forward(self, x):
570
+ x = self.patch_embed(x)
571
+ if self.ape:
572
+ x = x + self.absolute_pos_embed
573
+ x = self.pos_drop(x)
574
+
575
+ h = self.img_size[0] // self.patch_size[0]
576
+ w = self.img_size[1] // self.patch_size[1]
577
+ outs = []
578
+
579
+ for i, layer in enumerate(self.layers):
580
+ px, x = layer(x)
581
+ b, n, c = px.shape
582
+
583
+ if i != len(self.layers) - 1 or not self.output_avg:
584
+ px = px.permute(0, 2, 1).contiguous()
585
+ px = px.reshape(b, c, h, w)
586
+ # is this a fair assumption ?? i think it's baked into the architecture
587
+ h, w = h//2, w//2
588
+ outs.append(px)
589
+
590
+ if self.output_avg:
591
+ return outs[-1].mean(dim=1)
592
+
593
+ return outs
594
+
595
+ def flops(self):
596
+ flops = 0
597
+ flops += self.patch_embed.flops()
598
+ for i, layer in enumerate(self.layers):
599
+ flops += layer.flops()
600
+ flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
601
+ flops += self.num_features * self.num_classes
602
+ return flops
603
+
604
+
605
+ def get_swin(drop_path_rate=0.3, output_avg=False):
606
+ args = get_args()
607
+
608
+ window_size = 7
609
+ embed_dim = 128
610
+ depths = [2, 2, 18, 2]
611
+ num_heads = [4, 8, 16, 32]
612
+ swin = SwinTransformer(
613
+ img_size=(args.img_h, args.img_w,),
614
+ in_chans=3,
615
+ patch_size=args.patch_dim,
616
+ embed_dim=embed_dim,
617
+ depths=depths,
618
+ num_heads=num_heads,
619
+ window_size=window_size,
620
+ drop_path_rate=drop_path_rate,
621
+ output_avg=output_avg,
622
+ )
623
+
624
+ return swin
625
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/utils.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import torch
3
+ import torch.nn.functional as F
4
+
5
+
6
+ def resize(input,
7
+ size=None,
8
+ scale_factor=None,
9
+ mode='nearest',
10
+ align_corners=None,
11
+ warning=True):
12
+ if warning:
13
+ if size is not None and align_corners:
14
+ input_h, input_w = tuple(int(x) for x in input.shape[2:])
15
+ output_h, output_w = tuple(int(x) for x in size)
16
+ if output_h > input_h or output_w > output_h:
17
+ if ((output_h > 1 and output_w > 1 and input_h > 1
18
+ and input_w > 1) and (output_h - 1) % (input_h - 1)
19
+ and (output_w - 1) % (input_w - 1)):
20
+ warnings.warn(
21
+ f'When align_corners={align_corners}, '
22
+ 'the output would more aligned if '
23
+ f'input size {(input_h, input_w)} is `x+1` and '
24
+ f'out size {(output_h, output_w)} is `nx+1`')
25
+ if isinstance(size, torch.Size):
26
+ size = tuple(int(x) for x in size)
27
+ return F.interpolate(input, size, scale_factor, mode, align_corners)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/model/vision/vit_backbone.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Vision Transformer(VIT) model."""
4
+
5
+ import math
6
+ import einops
7
+ import torch
8
+ import apex
9
+ import torch.nn.functional as F
10
+ from megatron import get_args
11
+ from megatron.model.transformer import ParallelTransformer
12
+ from megatron.model.utils import (
13
+ get_linear_layer,
14
+ init_method_normal,
15
+ scaled_init_method_normal,
16
+ )
17
+ from megatron.model.module import MegatronModule
18
+
19
+ CLASS_TOKEN_LENGTH = 8
20
+
21
+ class VitMlpHead(MegatronModule):
22
+ """Pooler layer.
23
+
24
+ Pool hidden states of a specific token (for example start of the
25
+ sequence) and add a linear transformation followed by a tanh.
26
+
27
+ Arguments:
28
+ hidden_size: hidden size
29
+ init_method: weight initialization method for the linear layer.
30
+ bias is set to zero.
31
+ """
32
+
33
+ def __init__(self, hidden_size, num_classes):
34
+ super(VitMlpHead, self).__init__()
35
+ self.dense_in = torch.nn.Linear(hidden_size, hidden_size)
36
+ self.relu = torch.nn.ReLU()
37
+ self.dense_out = torch.nn.Linear(hidden_size, num_classes)
38
+ torch.nn.init.constant_(self.dense_out.bias, -10)
39
+
40
+ def forward(self, hidden_states):
41
+ # hidden_states: [b, 1, h]
42
+ # sequence_index: index of the token to pool.
43
+ dense_in_result = self.dense_in(hidden_states)
44
+ tanh_result = torch.tanh(dense_in_result)
45
+ dense_out_result = self.dense_out(tanh_result)
46
+ return dense_out_result
47
+
48
+
49
+ def isPerfectSquare(x):
50
+ if(x >= 0):
51
+ sr = math.sqrt(x)
52
+ return (int(sr) * int(sr) == x)
53
+ return False
54
+
55
+
56
+ def twod_interpolate_position_embeddings_hook(
57
+ state_dict,
58
+ prefix,
59
+ local_metadata,
60
+ strict,
61
+ missing_keys,
62
+ unexpected_keys,
63
+ error_msgs,
64
+ ):
65
+
66
+ args = get_args()
67
+ num_patches_per_dim_h = args.img_h // args.patch_dim
68
+ num_patches_per_dim_w = args.img_w // args.patch_dim
69
+ num_patches = num_patches_per_dim_h * num_patches_per_dim_w
70
+ hidden_size = args.hidden_size
71
+
72
+ key = prefix + "weight"
73
+
74
+ assert key in state_dict
75
+ if key in state_dict:
76
+ input_param = state_dict[key]
77
+
78
+ input_seq_len = input_param.shape[0]
79
+ assert(isPerfectSquare(input_seq_len) or isPerfectSquare(input_seq_len - CLASS_TOKEN_LENGTH))
80
+ input_has_class_token = not isPerfectSquare(input_seq_len)
81
+ num_tok_input = input_seq_len - CLASS_TOKEN_LENGTH if input_has_class_token else input_seq_len
82
+ num_tok_output = num_patches
83
+ output_has_class_token = args.class_token_present
84
+
85
+ # update input_param and load it to state_dict[key]
86
+ if input_has_class_token:
87
+ input_param_tok = input_param[:CLASS_TOKEN_LENGTH, :]
88
+ input_param_grid = input_param[CLASS_TOKEN_LENGTH:, :]
89
+ else:
90
+ input_param_tok = torch.zeros(CLASS_TOKEN_LENGTH, hidden_size)
91
+ input_param_grid = input_param
92
+
93
+ assert input_param.shape[1] == hidden_size
94
+
95
+ if num_tok_input != num_tok_output:
96
+
97
+ gs_input = int(math.sqrt(num_tok_input))
98
+ gs_new = (num_patches_per_dim_h, num_patches_per_dim_w)
99
+
100
+ input_param_grid = input_param_grid.transpose(0, 1).contiguous()
101
+ input_param_grid = input_param_grid.reshape(
102
+ (1, -1, gs_input, gs_input)
103
+ )
104
+ input_param_grid = input_param_grid.float()
105
+ scale_factor = (gs_new[0] / gs_input, gs_new[1] / gs_input)
106
+
107
+ input_param_grid = F.interpolate(
108
+ input_param_grid, scale_factor=scale_factor, mode="bilinear"
109
+ )
110
+
111
+ input_param_grid = input_param_grid.half()
112
+ input_param_grid = input_param_grid.reshape((-1, num_tok_output))
113
+ input_param_grid = input_param_grid.transpose(0, 1).contiguous()
114
+
115
+ assert input_param_grid.shape[1] == hidden_size
116
+
117
+ input_param = input_param_grid
118
+ assert (
119
+ input_param.shape[0] == num_tok_output
120
+ and input_param.shape[1] == hidden_size
121
+ )
122
+
123
+ if output_has_class_token:
124
+ input_param = torch.cat((input_param_tok, input_param), dim=0)
125
+
126
+ state_dict[key] = input_param
127
+
128
+
129
+ class VitBackbone(MegatronModule):
130
+ """Vision Transformer Model."""
131
+
132
+ def __init__(self,
133
+ config,
134
+ pre_process=True,
135
+ post_process=True,
136
+ class_token=True,
137
+ single_token_output=False,
138
+ post_layer_norm=True,
139
+ drop_path_rate=0.0):
140
+ super(VitBackbone, self).__init__(share_embeddings_and_output_weights=False)
141
+ args = get_args()
142
+
143
+ self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
144
+
145
+ self.pre_process = pre_process
146
+ self.post_process = post_process
147
+ self.class_token = class_token
148
+ self.post_layer_norm = post_layer_norm
149
+ self.hidden_size = args.hidden_size
150
+ self.patch_dim = args.patch_dim
151
+ self.img_h = args.img_h
152
+ self.img_w = args.img_w
153
+ self.micro_batch_size = args.micro_batch_size
154
+ self.single_token_output = single_token_output
155
+ self.drop_path_rate = drop_path_rate
156
+
157
+ assert self.img_h % self.patch_dim == 0
158
+ assert self.img_w % self.patch_dim == 0
159
+ self.num_patches_per_dim_h = self.img_h // self.patch_dim
160
+ self.num_patches_per_dim_w = self.img_w // self.patch_dim
161
+ self.num_patches = self.num_patches_per_dim_h * self.num_patches_per_dim_w
162
+ self.seq_length = self.num_patches + (CLASS_TOKEN_LENGTH if self.class_token else 0)
163
+ self.flatten_dim = self.patch_dim * self.patch_dim * args.num_channels
164
+ self.input_tensor = None
165
+ self.position_ids = None
166
+
167
+ if self.pre_process:
168
+ # cls_token
169
+ if self.class_token:
170
+ self.cls_token = torch.nn.Parameter(
171
+ torch.randn(1, CLASS_TOKEN_LENGTH, self.hidden_size)
172
+ )
173
+ torch.nn.init.zeros_(self.cls_token)
174
+ self.position_ids = torch.arange(self.seq_length).expand(1, -1).cuda()
175
+
176
+ # Linear encoder
177
+ self.linear_encoder = torch.nn.Linear(
178
+ self.flatten_dim, self.hidden_size
179
+ )
180
+
181
+ # embedding
182
+ self.position_embeddings = torch.nn.Embedding(
183
+ self.seq_length, self.hidden_size
184
+ )
185
+ init_method_normal(args.init_method_std)(
186
+ self.position_embeddings.weight
187
+ )
188
+
189
+ args.class_token_present = self.class_token
190
+ self.position_embeddings._register_load_state_dict_pre_hook(
191
+ twod_interpolate_position_embeddings_hook
192
+ )
193
+
194
+ self.embedding_dropout = torch.nn.Dropout(args.hidden_dropout)
195
+
196
+ # Transformer
197
+ self.transformer = ParallelTransformer(
198
+ config,
199
+ pre_process=self.pre_process,
200
+ post_process=self.post_process,
201
+ post_layer_norm=self.post_layer_norm,
202
+ drop_path_rate=self.drop_path_rate
203
+ )
204
+
205
+ def set_input_tensor(self, input_tensor):
206
+ """See megatron.model.transformer.set_input_tensor()"""
207
+ self.transformer.set_input_tensor(input_tensor)
208
+
209
+ def forward(self, input):
210
+
211
+ if self.pre_process:
212
+ rearranged_input = einops.rearrange(
213
+ input,
214
+ "b c (h p1) (w p2) -> b (h w) (p1 p2 c)",
215
+ p1=self.patch_dim,
216
+ p2=self.patch_dim,
217
+ )
218
+
219
+ assert rearranged_input.dtype == torch.half
220
+ encoder_output = self.linear_encoder(rearranged_input)
221
+
222
+ concatenated_tokens = encoder_output
223
+ if self.class_token:
224
+ cls_tokens = self.cls_token.expand(encoder_output.shape[0], -1, -1)
225
+ concatenated_tokens = torch.cat((cls_tokens, encoder_output), dim=1)
226
+
227
+ token_embeddings = concatenated_tokens + \
228
+ self.position_embeddings(self.position_ids[:, :concatenated_tokens.shape[1]])
229
+ # [b, s, h] => [s, b, h]
230
+ token_embeddings = token_embeddings.transpose(0, 1).contiguous()
231
+ hidden_states = self.embedding_dropout(token_embeddings)
232
+ else:
233
+ hidden_states = input
234
+
235
+ hidden_states = self.transformer(hidden_states, None)
236
+
237
+ if self.post_process:
238
+ # [s b h] => [b s h]
239
+ if self.single_token_output:
240
+ hidden_states = hidden_states[0]
241
+ else:
242
+ hidden_states = hidden_states.transpose(0, 1).contiguous()
243
+
244
+ return hidden_states
245
+
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/mpu/tests/__init__.py ADDED
File without changes
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/mpu/tests/commons.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import argparse
4
+ import os
5
+ import random
6
+ import numpy
7
+ import torch
8
+
9
+ import mpu
10
+ from deepspeed.accelerator import get_accelerator
11
+
12
+ class IdentityLayer(torch.nn.Module):
13
+ def __init__(self, size, scale=1.0):
14
+ super(IdentityLayer, self).__init__()
15
+ self.weight = torch.nn.Parameter(scale * torch.randn(size))
16
+
17
+ def forward(self):
18
+ return self.weight
19
+
20
+
21
+ def set_random_seed(seed):
22
+ """Set random seed for reproducability."""
23
+ random.seed(seed)
24
+ numpy.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+ mpu.model_parallel_cuda_manual_seed(seed)
27
+
28
+
29
+ def initialize_distributed(backend='nccl'):
30
+ """Initialize torch.distributed."""
31
+ # Get local rank in case it is provided.
32
+ parser = argparse.ArgumentParser()
33
+ parser.add_argument('--local_rank', type=int, default=None,
34
+ help='local rank passed from distributed launcher')
35
+ args = parser.parse_args()
36
+ local_rank = args.local_rank
37
+
38
+ # Get rank and world size.
39
+ rank = int(os.getenv('RANK', '0'))
40
+ world_size = int(os.getenv("WORLD_SIZE", '1'))
41
+
42
+ print('> initializing torch.distributed with local rank: {}, '
43
+ 'rank: {}, world size: {}'.format(local_rank, rank, world_size))
44
+
45
+ # Set the device id.
46
+ device = rank % get_accelerator().device_count()
47
+ if local_rank is not None:
48
+ device = local_rank
49
+ get_accelerator().set_device(device)
50
+
51
+ # Call the init process.
52
+ init_method = 'tcp://'
53
+ master_ip = os.getenv('MASTER_ADDR', 'localhost')
54
+ master_port = os.getenv('MASTER_PORT', '6000')
55
+ init_method += master_ip + ':' + master_port
56
+ torch.distributed.init_process_group(
57
+ backend=backend,
58
+ world_size=world_size,
59
+ rank=rank,
60
+ init_method=init_method)
61
+
62
+
63
+ def print_separator(message):
64
+ torch.distributed.barrier()
65
+ filler_len = (78 - len(message)) // 2
66
+ filler = '-' * filler_len
67
+ string = '\n' + filler + ' {} '.format(message) + filler
68
+ if torch.distributed.get_rank() == 0:
69
+ print(string, flush=True)
70
+ torch.distributed.barrier()
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/megatron/mpu/tests/test_cross_entropy.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ from commons import set_random_seed
4
+ from commons import IdentityLayer
5
+ from commons import print_separator
6
+ from commons import initialize_distributed
7
+ from mpu.cross_entropy import vocab_parallel_cross_entropy
8
+ import mpu
9
+ import torch.nn.functional as F
10
+ import torch
11
+ import random
12
+ import sys
13
+ from deepspeed.accelerator import get_accelerator
14
+ sys.path.append("../..")
15
+
16
+
17
+ def torch_cross_entropy(batch_size, seq_length, vocab_size,
18
+ logits_scale, seed):
19
+ set_random_seed(seed)
20
+ identity = IdentityLayer((batch_size, seq_length, vocab_size),
21
+ scale=logits_scale).to(get_accelerator().device_name())
22
+ logits = identity()
23
+ target = get_accelerator().LongTensor(
24
+ size=(batch_size, seq_length)).random_(0, vocab_size)
25
+ loss = F.cross_entropy(logits.view(-1, logits.size()[-1]),
26
+ target.view(-1),
27
+ reduction='none').view_as(target).mean()
28
+ loss.backward()
29
+ return loss, identity.weight.grad
30
+
31
+
32
+ def mpu_cross_entropy(batch_size, seq_length, vocab_size,
33
+ logits_scale, seed):
34
+ set_random_seed(seed)
35
+ identity = IdentityLayer((batch_size, seq_length, vocab_size),
36
+ scale=logits_scale).to(get_accelerator().device_name())
37
+ logits = identity()
38
+ logits_parallel = mpu.scatter_to_tensor_model_parallel_region(logits)
39
+ target = get_accelerator().LongTensor(
40
+ size=(batch_size, seq_length)).random_(0, vocab_size)
41
+ loss = vocab_parallel_cross_entropy(logits_parallel, target).mean()
42
+ loss.backward()
43
+ return loss, identity.weight.grad
44
+
45
+
46
+ def test_cross_entropy(tensor_model_parallel_size):
47
+
48
+ if torch.distributed.get_rank() == 0:
49
+ print('> testing cross entropy with model parallel size {} ...'.
50
+ format(tensor_model_parallel_size))
51
+
52
+ mpu.initialize_model_parallel(tensor_model_parallel_size)
53
+ tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
54
+
55
+ batch_size = 13
56
+ seq_length = 17
57
+ vocab_size_per_partition = 11
58
+ logits_scale = 1000.0
59
+ vocab_size = vocab_size_per_partition * tensor_model_parallel_size
60
+ seed = 1234
61
+
62
+ loss_torch, grad_torch = torch_cross_entropy(batch_size, seq_length,
63
+ vocab_size, logits_scale,
64
+ seed)
65
+ loss_mpu, grad_mpu = mpu_cross_entropy(batch_size, seq_length,
66
+ vocab_size, logits_scale,
67
+ seed)
68
+
69
+ error = loss_torch.sub_(loss_mpu).abs().max()
70
+ print(' max error in loss on global rank {}: {}'.format(
71
+ torch.distributed.get_rank(), error))
72
+ assert error < 1.0e-6
73
+
74
+ error = grad_torch.sub_(grad_mpu).abs().max()
75
+ print(' max error in grad on global rank {}: {}'.format(
76
+ torch.distributed.get_rank(), error))
77
+ assert error < 1.0e-6
78
+
79
+ # Reset groups
80
+ mpu.destroy_tensor_model_parallel()
81
+
82
+ torch.distributed.barrier()
83
+ if torch.distributed.get_rank() == 0:
84
+ print('>> passed the test :-)')
85
+
86
+
87
+ if __name__ == '__main__':
88
+
89
+ initialize_distributed()
90
+ world_size = torch.distributed.get_world_size()
91
+
92
+ tensor_model_parallel_size = 1
93
+ while tensor_model_parallel_size <= world_size:
94
+ print_separator('test cross entropy')
95
+ test_cross_entropy(tensor_model_parallel_size)
96
+ tensor_model_parallel_size *= 2