applied-ai-018 commited on
Commit
da97301
·
verified ·
1 Parent(s): e22332c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. ckpts/universal/global_step20/mp_rank_07_model_states.pt +3 -0
  3. ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  4. lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-loglikelihood +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_verbs-v0-loglikelihood +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-loglikelihood +1 -0
  8. lm-evaluation-harness/tests/testdata/crows_pairs_french_age-v0-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/crows_pairs_french_gender-v0-res.json +1 -0
  10. lm-evaluation-harness/tests/testdata/hellaswag-v0-loglikelihood +1 -0
  11. lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/pile_opensubtitles-v1-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-loglikelihood_rolling +1 -0
  15. lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v1-res.json +1 -0
  16. lm-evaluation-harness/tests/testdata/piqa-v0-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/prost-v0-res.json +1 -0
  18. lm-evaluation-harness/tests/testdata/squad2-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-res.json +1 -0
  20. venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so +3 -0
  21. venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 +3 -0
  22. venv/lib/python3.10/site-packages/transformers/models/dinat/__init__.py +56 -0
  23. venv/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/configuration_dinat.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/modeling_dinat.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/dinat/configuration_dinat.py +152 -0
  27. venv/lib/python3.10/site-packages/transformers/models/dinat/modeling_dinat.py +976 -0
  28. venv/lib/python3.10/site-packages/transformers/models/nllb/__init__.py +64 -0
  29. venv/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py +433 -0
  33. venv/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb_fast.py +340 -0
  34. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py +68 -0
  35. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/configuration_nllb_moe.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/modeling_nllb_moe.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py +218 -0
  40. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py +160 -0
  41. venv/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py +1792 -0
  42. venv/lib/python3.10/site-packages/transformers/models/owlvit/__init__.py +100 -0
  43. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/configuration_owlvit.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/convert_owlvit_original_flax_to_hf.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/feature_extraction_owlvit.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/image_processing_owlvit.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/modeling_owlvit.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/processing_owlvit.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/owlvit/configuration_owlvit.py +383 -0
.gitattributes CHANGED
@@ -76,3 +76,5 @@ venv/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so
76
  venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
77
  venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
78
  venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text
 
 
 
76
  venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
77
  venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
78
  venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text
79
+ venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
80
+ venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step20/mp_rank_07_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1363e27ebe9f01e1bfebe941512311c310278a9617088f04a956501eb877652
3
+ size 4230084
ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:755cfad2148bad9529e2acf8a360c49adcd9ce49f62187a2c924cea966c899aa
3
+ size 9387
lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c421f9cd5a5001b80e528441da925128177a04db8526ebcdab543a90b33c9ce2
lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_complex_NP_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_complex_NP_island": 0}}
lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_verbs-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 63ec733873f94ace71cb34112d1c3cd5bb768c26b975fb90acc9b8ba3f4e938e
lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 755bdfe2c89737c43001ff1dc83d68ad33e444aaf0669af66aaf82dcd09f2eca
lm-evaluation-harness/tests/testdata/crows_pairs_french_age-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_age": {"likelihood_difference": 0.31896094607685194, "likelihood_difference_stderr": 0.024068391933540753, "pct_stereotype": 0.4444444444444444, "pct_stereotype_stderr": 0.05267171812666418}}, "versions": {"crows_pairs_french_age": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_french_gender-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_gender": {"likelihood_difference": 0.3364019171359413, "likelihood_difference_stderr": 0.012815700745990895, "pct_stereotype": 0.4766355140186916, "pct_stereotype_stderr": 0.027920316348204986}}, "versions": {"crows_pairs_french_gender": 0}}
lm-evaluation-harness/tests/testdata/hellaswag-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ abb808c97d6529eda6c11067837a132c62d25cba0394d720f80cca6df9f7196e
lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"iwslt17-ar-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.015049895477752772, "chrf_stderr": 0.0002940315671893584, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"iwslt17-ar-en": 0}}
lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_bookcorpus2": {"bits_per_byte": 1.1631037706429144e-06, "byte_perplexity": 1.000001163104447, "word_perplexity": 1.0000066499426599}}, "versions": {"pile_bookcorpus2": 0}}
lm-evaluation-harness/tests/testdata/pile_opensubtitles-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_opensubtitles": {"bits_per_byte": 2.1948356082685497e-05, "byte_perplexity": 1.0000152135568616, "word_perplexity": 1.0000856162053249}}, "versions": {"pile_opensubtitles": 1}}
lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 66436569a43163afb2caf422d32c5f329899e74c49865d4d13881fd465fd9976
lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_ubuntu-irc": {"bits_per_byte": 2.3513498942121155e-06, "byte_perplexity": 1.0000016298328778, "word_perplexity": 1.0000108866656874}}, "versions": {"pile_ubuntu-irc": 1}}
lm-evaluation-harness/tests/testdata/piqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"piqa": {"acc": 0.514145810663765, "acc_norm": 0.5114254624591947, "acc_norm_stderr": 0.01166277802645167, "acc_stderr": 0.011661154475524836}}, "versions": {"piqa": 0}}
lm-evaluation-harness/tests/testdata/prost-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"prost": {"acc": 0.24631725021349274, "acc_norm": 0.2581127241673783, "acc_norm_stderr": 0.00319703079646546, "acc_stderr": 0.003147855968061357}}, "versions": {"prost": 0}}
lm-evaluation-harness/tests/testdata/squad2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"squad2": {"HasAns_exact": 0.0, "HasAns_f1": 0.0, "NoAns_exact": 0.0, "NoAns_f1": 0.0, "best_exact": 50.07159100480081, "best_f1": 50.07159100480081, "exact": 0.0, "f1": 0.0}}, "versions": {"squad2": 0}}
lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-ta-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.013841110664859798, "chrf_stderr": 0.00018476696850880766, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ta-en": 0}}
venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d69b13bb38af740d932b3333f923b1c11be3a42727fe327902cd8384dc6b3874
3
+ size 1291200
venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a87b3e5ac91904fc9ace8650052d704711dd74563afb9caf98ca589b71995492
3
+ size 5332496
venv/lib/python3.10/site-packages/transformers/models/dinat/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_dinat": ["DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DinatConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_dinat"] = [
29
+ "DINAT_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "DinatForImageClassification",
31
+ "DinatModel",
32
+ "DinatPreTrainedModel",
33
+ "DinatBackbone",
34
+ ]
35
+
36
+ if TYPE_CHECKING:
37
+ from .configuration_dinat import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP, DinatConfig
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ from .modeling_dinat import (
46
+ DINAT_PRETRAINED_MODEL_ARCHIVE_LIST,
47
+ DinatBackbone,
48
+ DinatForImageClassification,
49
+ DinatModel,
50
+ DinatPreTrainedModel,
51
+ )
52
+
53
+ else:
54
+ import sys
55
+
56
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (961 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/configuration_dinat.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinat/__pycache__/modeling_dinat.cpython-310.pyc ADDED
Binary file (32.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dinat/configuration_dinat.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Dilated Neighborhood Attention Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class DinatConfig(BackboneConfigMixin, PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the Dinat
33
+ [shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ patch_size (`int`, *optional*, defaults to 4):
40
+ The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
41
+ num_channels (`int`, *optional*, defaults to 3):
42
+ The number of input channels.
43
+ embed_dim (`int`, *optional*, defaults to 64):
44
+ Dimensionality of patch embedding.
45
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
46
+ Number of layers in each level of the encoder.
47
+ num_heads (`List[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
48
+ Number of attention heads in each layer of the Transformer encoder.
49
+ kernel_size (`int`, *optional*, defaults to 7):
50
+ Neighborhood Attention kernel size.
51
+ dilations (`List[List[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`):
52
+ Dilation value of each NA layer in the Transformer encoder.
53
+ mlp_ratio (`float`, *optional*, defaults to 3.0):
54
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
55
+ qkv_bias (`bool`, *optional*, defaults to `True`):
56
+ Whether or not a learnable bias should be added to the queries, keys and values.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
58
+ The dropout probability for all fully connected layers in the embeddings and encoder.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
62
+ Stochastic depth rate.
63
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
64
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
65
+ `"selu"` and `"gelu_new"` are supported.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
69
+ The epsilon used by the layer normalization layers.
70
+ layer_scale_init_value (`float`, *optional*, defaults to 0.0):
71
+ The initial value for the layer scale. Disabled if <=0.
72
+ out_features (`List[str]`, *optional*):
73
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
74
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
75
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
76
+ same order as defined in the `stage_names` attribute.
77
+ out_indices (`List[int]`, *optional*):
78
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
79
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
80
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
81
+ same order as defined in the `stage_names` attribute.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import DinatConfig, DinatModel
87
+
88
+ >>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration
89
+ >>> configuration = DinatConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration
92
+ >>> model = DinatModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "dinat"
99
+
100
+ attribute_map = {
101
+ "num_attention_heads": "num_heads",
102
+ "num_hidden_layers": "num_layers",
103
+ }
104
+
105
+ def __init__(
106
+ self,
107
+ patch_size=4,
108
+ num_channels=3,
109
+ embed_dim=64,
110
+ depths=[3, 4, 6, 5],
111
+ num_heads=[2, 4, 8, 16],
112
+ kernel_size=7,
113
+ dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]],
114
+ mlp_ratio=3.0,
115
+ qkv_bias=True,
116
+ hidden_dropout_prob=0.0,
117
+ attention_probs_dropout_prob=0.0,
118
+ drop_path_rate=0.1,
119
+ hidden_act="gelu",
120
+ initializer_range=0.02,
121
+ layer_norm_eps=1e-5,
122
+ layer_scale_init_value=0.0,
123
+ out_features=None,
124
+ out_indices=None,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(**kwargs)
128
+
129
+ self.patch_size = patch_size
130
+ self.num_channels = num_channels
131
+ self.embed_dim = embed_dim
132
+ self.depths = depths
133
+ self.num_layers = len(depths)
134
+ self.num_heads = num_heads
135
+ self.kernel_size = kernel_size
136
+ self.dilations = dilations
137
+ self.mlp_ratio = mlp_ratio
138
+ self.qkv_bias = qkv_bias
139
+ self.hidden_dropout_prob = hidden_dropout_prob
140
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
141
+ self.drop_path_rate = drop_path_rate
142
+ self.hidden_act = hidden_act
143
+ self.layer_norm_eps = layer_norm_eps
144
+ self.initializer_range = initializer_range
145
+ # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
146
+ # this indicates the channel dimension after the last stage of the model
147
+ self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
148
+ self.layer_scale_init_value = layer_scale_init_value
149
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
150
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
151
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
152
+ )
venv/lib/python3.10/site-packages/transformers/models/dinat/modeling_dinat.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Dilated Neighborhood Attention Transformer model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BackboneOutput
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
31
+ from ...utils import (
32
+ ModelOutput,
33
+ OptionalDependencyNotAvailable,
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ is_natten_available,
38
+ logging,
39
+ replace_return_docstrings,
40
+ requires_backends,
41
+ )
42
+ from ...utils.backbone_utils import BackboneMixin
43
+ from .configuration_dinat import DinatConfig
44
+
45
+
46
+ if is_natten_available():
47
+ from natten.functional import natten2dav, natten2dqkrpb
48
+ else:
49
+
50
+ def natten2dqkrpb(*args, **kwargs):
51
+ raise OptionalDependencyNotAvailable()
52
+
53
+ def natten2dav(*args, **kwargs):
54
+ raise OptionalDependencyNotAvailable()
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+
59
+ # General docstring
60
+ _CONFIG_FOR_DOC = "DinatConfig"
61
+
62
+ # Base docstring
63
+ _CHECKPOINT_FOR_DOC = "shi-labs/dinat-mini-in1k-224"
64
+ _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512]
65
+
66
+ # Image classification docstring
67
+ _IMAGE_CLASS_CHECKPOINT = "shi-labs/dinat-mini-in1k-224"
68
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
69
+
70
+
71
+ from ..deprecated._archive_maps import DINAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
72
+
73
+
74
+ # drop_path and DinatDropPath are from the timm library.
75
+
76
+
77
+ @dataclass
78
+ # Copied from transformers.models.nat.modeling_nat.NatEncoderOutput with Nat->Dinat
79
+ class DinatEncoderOutput(ModelOutput):
80
+ """
81
+ Dinat encoder's outputs, with potential hidden states and attentions.
82
+
83
+ Args:
84
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
85
+ Sequence of hidden-states at the output of the last layer of the model.
86
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
87
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
88
+ shape `(batch_size, sequence_length, hidden_size)`.
89
+
90
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
91
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
92
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
93
+ sequence_length)`.
94
+
95
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
96
+ heads.
97
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
98
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
99
+ shape `(batch_size, hidden_size, height, width)`.
100
+
101
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
102
+ include the spatial dimensions.
103
+ """
104
+
105
+ last_hidden_state: torch.FloatTensor = None
106
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
107
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
108
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
109
+
110
+
111
+ @dataclass
112
+ # Copied from transformers.models.nat.modeling_nat.NatModelOutput with Nat->Dinat
113
+ class DinatModelOutput(ModelOutput):
114
+ """
115
+ Dinat model's outputs that also contains a pooling of the last hidden states.
116
+
117
+ Args:
118
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
119
+ Sequence of hidden-states at the output of the last layer of the model.
120
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
121
+ Average pooling of the last layer hidden-state.
122
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
124
+ shape `(batch_size, sequence_length, hidden_size)`.
125
+
126
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
127
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
128
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
129
+ sequence_length)`.
130
+
131
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
132
+ heads.
133
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
134
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
135
+ shape `(batch_size, hidden_size, height, width)`.
136
+
137
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
138
+ include the spatial dimensions.
139
+ """
140
+
141
+ last_hidden_state: torch.FloatTensor = None
142
+ pooler_output: Optional[torch.FloatTensor] = None
143
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
144
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
145
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
146
+
147
+
148
+ @dataclass
149
+ # Copied from transformers.models.nat.modeling_nat.NatImageClassifierOutput with Nat->Dinat
150
+ class DinatImageClassifierOutput(ModelOutput):
151
+ """
152
+ Dinat outputs for image classification.
153
+
154
+ Args:
155
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
156
+ Classification (or regression if config.num_labels==1) loss.
157
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
158
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
159
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
160
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
161
+ shape `(batch_size, sequence_length, hidden_size)`.
162
+
163
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
164
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
165
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
166
+ sequence_length)`.
167
+
168
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
169
+ heads.
170
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
171
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
172
+ shape `(batch_size, hidden_size, height, width)`.
173
+
174
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
175
+ include the spatial dimensions.
176
+ """
177
+
178
+ loss: Optional[torch.FloatTensor] = None
179
+ logits: torch.FloatTensor = None
180
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
181
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
182
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
183
+
184
+
185
+ # Copied from transformers.models.nat.modeling_nat.NatEmbeddings with Nat->Dinat
186
+ class DinatEmbeddings(nn.Module):
187
+ """
188
+ Construct the patch and position embeddings.
189
+ """
190
+
191
+ def __init__(self, config):
192
+ super().__init__()
193
+
194
+ self.patch_embeddings = DinatPatchEmbeddings(config)
195
+
196
+ self.norm = nn.LayerNorm(config.embed_dim)
197
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
198
+
199
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]:
200
+ embeddings = self.patch_embeddings(pixel_values)
201
+ embeddings = self.norm(embeddings)
202
+
203
+ embeddings = self.dropout(embeddings)
204
+
205
+ return embeddings
206
+
207
+
208
+ # Copied from transformers.models.nat.modeling_nat.NatPatchEmbeddings with Nat->Dinat
209
+ class DinatPatchEmbeddings(nn.Module):
210
+ """
211
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
212
+ `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
213
+ Transformer.
214
+ """
215
+
216
+ def __init__(self, config):
217
+ super().__init__()
218
+ patch_size = config.patch_size
219
+ num_channels, hidden_size = config.num_channels, config.embed_dim
220
+ self.num_channels = num_channels
221
+
222
+ if patch_size == 4:
223
+ pass
224
+ else:
225
+ # TODO: Support arbitrary patch sizes.
226
+ raise ValueError("Dinat only supports patch size of 4 at the moment.")
227
+
228
+ self.projection = nn.Sequential(
229
+ nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
230
+ nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
231
+ )
232
+
233
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
234
+ _, num_channels, height, width = pixel_values.shape
235
+ if num_channels != self.num_channels:
236
+ raise ValueError(
237
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
238
+ )
239
+ embeddings = self.projection(pixel_values)
240
+ embeddings = embeddings.permute(0, 2, 3, 1)
241
+
242
+ return embeddings
243
+
244
+
245
+ # Copied from transformers.models.nat.modeling_nat.NatDownsampler with Nat->Dinat
246
+ class DinatDownsampler(nn.Module):
247
+ """
248
+ Convolutional Downsampling Layer.
249
+
250
+ Args:
251
+ dim (`int`):
252
+ Number of input channels.
253
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
254
+ Normalization layer class.
255
+ """
256
+
257
+ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
258
+ super().__init__()
259
+ self.dim = dim
260
+ self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
261
+ self.norm = norm_layer(2 * dim)
262
+
263
+ def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
264
+ input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
265
+ input_feature = self.norm(input_feature)
266
+ return input_feature
267
+
268
+
269
+ # Copied from transformers.models.beit.modeling_beit.drop_path
270
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
271
+ """
272
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
273
+
274
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
275
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
276
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
277
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
278
+ argument.
279
+ """
280
+ if drop_prob == 0.0 or not training:
281
+ return input
282
+ keep_prob = 1 - drop_prob
283
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
284
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
285
+ random_tensor.floor_() # binarize
286
+ output = input.div(keep_prob) * random_tensor
287
+ return output
288
+
289
+
290
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Dinat
291
+ class DinatDropPath(nn.Module):
292
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
293
+
294
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
295
+ super().__init__()
296
+ self.drop_prob = drop_prob
297
+
298
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
299
+ return drop_path(hidden_states, self.drop_prob, self.training)
300
+
301
+ def extra_repr(self) -> str:
302
+ return "p={}".format(self.drop_prob)
303
+
304
+
305
+ class NeighborhoodAttention(nn.Module):
306
+ def __init__(self, config, dim, num_heads, kernel_size, dilation):
307
+ super().__init__()
308
+ if dim % num_heads != 0:
309
+ raise ValueError(
310
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
311
+ )
312
+
313
+ self.num_attention_heads = num_heads
314
+ self.attention_head_size = int(dim / num_heads)
315
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
316
+ self.kernel_size = kernel_size
317
+ self.dilation = dilation
318
+
319
+ # rpb is learnable relative positional biases; same concept is used Swin.
320
+ self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1)))
321
+
322
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
323
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
324
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
325
+
326
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
327
+
328
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttention.transpose_for_scores with Nat->Dinat
329
+ def transpose_for_scores(self, x):
330
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
331
+ x = x.view(new_x_shape)
332
+ return x.permute(0, 3, 1, 2, 4)
333
+
334
+ def forward(
335
+ self,
336
+ hidden_states: torch.Tensor,
337
+ output_attentions: Optional[bool] = False,
338
+ ) -> Tuple[torch.Tensor]:
339
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
340
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
341
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
342
+
343
+ # Apply the scale factor before computing attention weights. It's usually more efficient because
344
+ # attention weights are typically a bigger tensor compared to query.
345
+ # It gives identical results because scalars are commutable in matrix multiplication.
346
+ query_layer = query_layer / math.sqrt(self.attention_head_size)
347
+
348
+ # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases.
349
+ attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, self.dilation)
350
+
351
+ # Normalize the attention scores to probabilities.
352
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
353
+
354
+ # This is actually dropping out entire tokens to attend to, which might
355
+ # seem a bit unusual, but is taken from the original Transformer paper.
356
+ attention_probs = self.dropout(attention_probs)
357
+
358
+ context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, self.dilation)
359
+ context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous()
360
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
361
+ context_layer = context_layer.view(new_context_layer_shape)
362
+
363
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
364
+
365
+ return outputs
366
+
367
+
368
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionOutput
369
+ class NeighborhoodAttentionOutput(nn.Module):
370
+ def __init__(self, config, dim):
371
+ super().__init__()
372
+ self.dense = nn.Linear(dim, dim)
373
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
374
+
375
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
376
+ hidden_states = self.dense(hidden_states)
377
+ hidden_states = self.dropout(hidden_states)
378
+
379
+ return hidden_states
380
+
381
+
382
+ class NeighborhoodAttentionModule(nn.Module):
383
+ def __init__(self, config, dim, num_heads, kernel_size, dilation):
384
+ super().__init__()
385
+ self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size, dilation)
386
+ self.output = NeighborhoodAttentionOutput(config, dim)
387
+ self.pruned_heads = set()
388
+
389
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionModule.prune_heads
390
+ def prune_heads(self, heads):
391
+ if len(heads) == 0:
392
+ return
393
+ heads, index = find_pruneable_heads_and_indices(
394
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
395
+ )
396
+
397
+ # Prune linear layers
398
+ self.self.query = prune_linear_layer(self.self.query, index)
399
+ self.self.key = prune_linear_layer(self.self.key, index)
400
+ self.self.value = prune_linear_layer(self.self.value, index)
401
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
402
+
403
+ # Update hyper params and store pruned heads
404
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
405
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
406
+ self.pruned_heads = self.pruned_heads.union(heads)
407
+
408
+ # Copied from transformers.models.nat.modeling_nat.NeighborhoodAttentionModule.forward
409
+ def forward(
410
+ self,
411
+ hidden_states: torch.Tensor,
412
+ output_attentions: Optional[bool] = False,
413
+ ) -> Tuple[torch.Tensor]:
414
+ self_outputs = self.self(hidden_states, output_attentions)
415
+ attention_output = self.output(self_outputs[0], hidden_states)
416
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
417
+ return outputs
418
+
419
+
420
+ # Copied from transformers.models.nat.modeling_nat.NatIntermediate with Nat->Dinat
421
+ class DinatIntermediate(nn.Module):
422
+ def __init__(self, config, dim):
423
+ super().__init__()
424
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
425
+ if isinstance(config.hidden_act, str):
426
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
427
+ else:
428
+ self.intermediate_act_fn = config.hidden_act
429
+
430
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
431
+ hidden_states = self.dense(hidden_states)
432
+ hidden_states = self.intermediate_act_fn(hidden_states)
433
+ return hidden_states
434
+
435
+
436
+ # Copied from transformers.models.nat.modeling_nat.NatOutput with Nat->Dinat
437
+ class DinatOutput(nn.Module):
438
+ def __init__(self, config, dim):
439
+ super().__init__()
440
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
441
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
442
+
443
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
444
+ hidden_states = self.dense(hidden_states)
445
+ hidden_states = self.dropout(hidden_states)
446
+ return hidden_states
447
+
448
+
449
+ class DinatLayer(nn.Module):
450
+ def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0):
451
+ super().__init__()
452
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
453
+ self.kernel_size = config.kernel_size
454
+ self.dilation = dilation
455
+ self.window_size = self.kernel_size * self.dilation
456
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
457
+ self.attention = NeighborhoodAttentionModule(
458
+ config, dim, num_heads, kernel_size=self.kernel_size, dilation=self.dilation
459
+ )
460
+ self.drop_path = DinatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
461
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
462
+ self.intermediate = DinatIntermediate(config, dim)
463
+ self.output = DinatOutput(config, dim)
464
+ self.layer_scale_parameters = (
465
+ nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True)
466
+ if config.layer_scale_init_value > 0
467
+ else None
468
+ )
469
+
470
+ def maybe_pad(self, hidden_states, height, width):
471
+ window_size = self.window_size
472
+ pad_values = (0, 0, 0, 0, 0, 0)
473
+ if height < window_size or width < window_size:
474
+ pad_l = pad_t = 0
475
+ pad_r = max(0, window_size - width)
476
+ pad_b = max(0, window_size - height)
477
+ pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b)
478
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
479
+ return hidden_states, pad_values
480
+
481
+ def forward(
482
+ self,
483
+ hidden_states: torch.Tensor,
484
+ output_attentions: Optional[bool] = False,
485
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
486
+ batch_size, height, width, channels = hidden_states.size()
487
+ shortcut = hidden_states
488
+
489
+ hidden_states = self.layernorm_before(hidden_states)
490
+ # pad hidden_states if they are smaller than kernel size x dilation
491
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
492
+
493
+ _, height_pad, width_pad, _ = hidden_states.shape
494
+
495
+ attention_outputs = self.attention(hidden_states, output_attentions=output_attentions)
496
+
497
+ attention_output = attention_outputs[0]
498
+
499
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
500
+ if was_padded:
501
+ attention_output = attention_output[:, :height, :width, :].contiguous()
502
+
503
+ if self.layer_scale_parameters is not None:
504
+ attention_output = self.layer_scale_parameters[0] * attention_output
505
+
506
+ hidden_states = shortcut + self.drop_path(attention_output)
507
+
508
+ layer_output = self.layernorm_after(hidden_states)
509
+ layer_output = self.output(self.intermediate(layer_output))
510
+
511
+ if self.layer_scale_parameters is not None:
512
+ layer_output = self.layer_scale_parameters[1] * layer_output
513
+
514
+ layer_output = hidden_states + self.drop_path(layer_output)
515
+
516
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
517
+ return layer_outputs
518
+
519
+
520
+ class DinatStage(nn.Module):
521
+ def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample):
522
+ super().__init__()
523
+ self.config = config
524
+ self.dim = dim
525
+ self.layers = nn.ModuleList(
526
+ [
527
+ DinatLayer(
528
+ config=config,
529
+ dim=dim,
530
+ num_heads=num_heads,
531
+ dilation=dilations[i],
532
+ drop_path_rate=drop_path_rate[i],
533
+ )
534
+ for i in range(depth)
535
+ ]
536
+ )
537
+
538
+ # patch merging layer
539
+ if downsample is not None:
540
+ self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
541
+ else:
542
+ self.downsample = None
543
+
544
+ self.pointing = False
545
+
546
+ # Copied from transformers.models.nat.modeling_nat.NatStage.forward
547
+ def forward(
548
+ self,
549
+ hidden_states: torch.Tensor,
550
+ output_attentions: Optional[bool] = False,
551
+ ) -> Tuple[torch.Tensor]:
552
+ _, height, width, _ = hidden_states.size()
553
+ for i, layer_module in enumerate(self.layers):
554
+ layer_outputs = layer_module(hidden_states, output_attentions)
555
+ hidden_states = layer_outputs[0]
556
+
557
+ hidden_states_before_downsampling = hidden_states
558
+ if self.downsample is not None:
559
+ hidden_states = self.downsample(hidden_states_before_downsampling)
560
+
561
+ stage_outputs = (hidden_states, hidden_states_before_downsampling)
562
+
563
+ if output_attentions:
564
+ stage_outputs += layer_outputs[1:]
565
+ return stage_outputs
566
+
567
+
568
+ class DinatEncoder(nn.Module):
569
+ def __init__(self, config):
570
+ super().__init__()
571
+ self.num_levels = len(config.depths)
572
+ self.config = config
573
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
574
+ self.levels = nn.ModuleList(
575
+ [
576
+ DinatStage(
577
+ config=config,
578
+ dim=int(config.embed_dim * 2**i_layer),
579
+ depth=config.depths[i_layer],
580
+ num_heads=config.num_heads[i_layer],
581
+ dilations=config.dilations[i_layer],
582
+ drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
583
+ downsample=DinatDownsampler if (i_layer < self.num_levels - 1) else None,
584
+ )
585
+ for i_layer in range(self.num_levels)
586
+ ]
587
+ )
588
+
589
+ # Copied from transformers.models.nat.modeling_nat.NatEncoder.forward with Nat->Dinat
590
+ def forward(
591
+ self,
592
+ hidden_states: torch.Tensor,
593
+ output_attentions: Optional[bool] = False,
594
+ output_hidden_states: Optional[bool] = False,
595
+ output_hidden_states_before_downsampling: Optional[bool] = False,
596
+ return_dict: Optional[bool] = True,
597
+ ) -> Union[Tuple, DinatEncoderOutput]:
598
+ all_hidden_states = () if output_hidden_states else None
599
+ all_reshaped_hidden_states = () if output_hidden_states else None
600
+ all_self_attentions = () if output_attentions else None
601
+
602
+ if output_hidden_states:
603
+ # rearrange b h w c -> b c h w
604
+ reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
605
+ all_hidden_states += (hidden_states,)
606
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
607
+
608
+ for i, layer_module in enumerate(self.levels):
609
+ layer_outputs = layer_module(hidden_states, output_attentions)
610
+
611
+ hidden_states = layer_outputs[0]
612
+ hidden_states_before_downsampling = layer_outputs[1]
613
+
614
+ if output_hidden_states and output_hidden_states_before_downsampling:
615
+ # rearrange b h w c -> b c h w
616
+ reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2)
617
+ all_hidden_states += (hidden_states_before_downsampling,)
618
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
619
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
620
+ # rearrange b h w c -> b c h w
621
+ reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
622
+ all_hidden_states += (hidden_states,)
623
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
624
+
625
+ if output_attentions:
626
+ all_self_attentions += layer_outputs[2:]
627
+
628
+ if not return_dict:
629
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
630
+
631
+ return DinatEncoderOutput(
632
+ last_hidden_state=hidden_states,
633
+ hidden_states=all_hidden_states,
634
+ attentions=all_self_attentions,
635
+ reshaped_hidden_states=all_reshaped_hidden_states,
636
+ )
637
+
638
+
639
+ class DinatPreTrainedModel(PreTrainedModel):
640
+ """
641
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
642
+ models.
643
+ """
644
+
645
+ config_class = DinatConfig
646
+ base_model_prefix = "dinat"
647
+ main_input_name = "pixel_values"
648
+
649
+ def _init_weights(self, module):
650
+ """Initialize the weights"""
651
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
652
+ # Slightly different from the TF version which uses truncated_normal for initialization
653
+ # cf https://github.com/pytorch/pytorch/pull/5617
654
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
655
+ if module.bias is not None:
656
+ module.bias.data.zero_()
657
+ elif isinstance(module, nn.LayerNorm):
658
+ module.bias.data.zero_()
659
+ module.weight.data.fill_(1.0)
660
+
661
+
662
+ DINAT_START_DOCSTRING = r"""
663
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
664
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
665
+ behavior.
666
+
667
+ Parameters:
668
+ config ([`DinatConfig`]): Model configuration class with all the parameters of the model.
669
+ Initializing with a config file does not load the weights associated with the model, only the
670
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
671
+ """
672
+
673
+ DINAT_INPUTS_DOCSTRING = r"""
674
+ Args:
675
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
676
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
677
+ for details.
678
+
679
+ output_attentions (`bool`, *optional*):
680
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
681
+ tensors for more detail.
682
+ output_hidden_states (`bool`, *optional*):
683
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
684
+ more detail.
685
+ return_dict (`bool`, *optional*):
686
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
687
+ """
688
+
689
+
690
+ @add_start_docstrings(
691
+ "The bare Dinat Model transformer outputting raw hidden-states without any specific head on top.",
692
+ DINAT_START_DOCSTRING,
693
+ )
694
+ # Copied from transformers.models.nat.modeling_nat.NatModel with Nat->Dinat, NAT->DINAT
695
+ class DinatModel(DinatPreTrainedModel):
696
+ def __init__(self, config, add_pooling_layer=True):
697
+ super().__init__(config)
698
+
699
+ requires_backends(self, ["natten"])
700
+
701
+ self.config = config
702
+ self.num_levels = len(config.depths)
703
+ self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1))
704
+
705
+ self.embeddings = DinatEmbeddings(config)
706
+ self.encoder = DinatEncoder(config)
707
+
708
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
709
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
710
+
711
+ # Initialize weights and apply final processing
712
+ self.post_init()
713
+
714
+ def get_input_embeddings(self):
715
+ return self.embeddings.patch_embeddings
716
+
717
+ def _prune_heads(self, heads_to_prune):
718
+ """
719
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
720
+ class PreTrainedModel
721
+ """
722
+ for layer, heads in heads_to_prune.items():
723
+ self.encoder.layer[layer].attention.prune_heads(heads)
724
+
725
+ @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING)
726
+ @add_code_sample_docstrings(
727
+ checkpoint=_CHECKPOINT_FOR_DOC,
728
+ output_type=DinatModelOutput,
729
+ config_class=_CONFIG_FOR_DOC,
730
+ modality="vision",
731
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
732
+ )
733
+ def forward(
734
+ self,
735
+ pixel_values: Optional[torch.FloatTensor] = None,
736
+ output_attentions: Optional[bool] = None,
737
+ output_hidden_states: Optional[bool] = None,
738
+ return_dict: Optional[bool] = None,
739
+ ) -> Union[Tuple, DinatModelOutput]:
740
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
741
+ output_hidden_states = (
742
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
743
+ )
744
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
745
+
746
+ if pixel_values is None:
747
+ raise ValueError("You have to specify pixel_values")
748
+
749
+ embedding_output = self.embeddings(pixel_values)
750
+
751
+ encoder_outputs = self.encoder(
752
+ embedding_output,
753
+ output_attentions=output_attentions,
754
+ output_hidden_states=output_hidden_states,
755
+ return_dict=return_dict,
756
+ )
757
+
758
+ sequence_output = encoder_outputs[0]
759
+ sequence_output = self.layernorm(sequence_output)
760
+
761
+ pooled_output = None
762
+ if self.pooler is not None:
763
+ pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2))
764
+ pooled_output = torch.flatten(pooled_output, 1)
765
+
766
+ if not return_dict:
767
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
768
+
769
+ return output
770
+
771
+ return DinatModelOutput(
772
+ last_hidden_state=sequence_output,
773
+ pooler_output=pooled_output,
774
+ hidden_states=encoder_outputs.hidden_states,
775
+ attentions=encoder_outputs.attentions,
776
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
777
+ )
778
+
779
+
780
+ @add_start_docstrings(
781
+ """
782
+ Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state
783
+ of the [CLS] token) e.g. for ImageNet.
784
+ """,
785
+ DINAT_START_DOCSTRING,
786
+ )
787
+ class DinatForImageClassification(DinatPreTrainedModel):
788
+ def __init__(self, config):
789
+ super().__init__(config)
790
+
791
+ requires_backends(self, ["natten"])
792
+
793
+ self.num_labels = config.num_labels
794
+ self.dinat = DinatModel(config)
795
+
796
+ # Classifier head
797
+ self.classifier = (
798
+ nn.Linear(self.dinat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
799
+ )
800
+
801
+ # Initialize weights and apply final processing
802
+ self.post_init()
803
+
804
+ @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING)
805
+ @add_code_sample_docstrings(
806
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
807
+ output_type=DinatImageClassifierOutput,
808
+ config_class=_CONFIG_FOR_DOC,
809
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
810
+ )
811
+ def forward(
812
+ self,
813
+ pixel_values: Optional[torch.FloatTensor] = None,
814
+ labels: Optional[torch.LongTensor] = None,
815
+ output_attentions: Optional[bool] = None,
816
+ output_hidden_states: Optional[bool] = None,
817
+ return_dict: Optional[bool] = None,
818
+ ) -> Union[Tuple, DinatImageClassifierOutput]:
819
+ r"""
820
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
821
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
822
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
823
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
824
+ """
825
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
826
+
827
+ outputs = self.dinat(
828
+ pixel_values,
829
+ output_attentions=output_attentions,
830
+ output_hidden_states=output_hidden_states,
831
+ return_dict=return_dict,
832
+ )
833
+
834
+ pooled_output = outputs[1]
835
+
836
+ logits = self.classifier(pooled_output)
837
+
838
+ loss = None
839
+ if labels is not None:
840
+ if self.config.problem_type is None:
841
+ if self.num_labels == 1:
842
+ self.config.problem_type = "regression"
843
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
844
+ self.config.problem_type = "single_label_classification"
845
+ else:
846
+ self.config.problem_type = "multi_label_classification"
847
+
848
+ if self.config.problem_type == "regression":
849
+ loss_fct = MSELoss()
850
+ if self.num_labels == 1:
851
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
852
+ else:
853
+ loss = loss_fct(logits, labels)
854
+ elif self.config.problem_type == "single_label_classification":
855
+ loss_fct = CrossEntropyLoss()
856
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
857
+ elif self.config.problem_type == "multi_label_classification":
858
+ loss_fct = BCEWithLogitsLoss()
859
+ loss = loss_fct(logits, labels)
860
+
861
+ if not return_dict:
862
+ output = (logits,) + outputs[2:]
863
+ return ((loss,) + output) if loss is not None else output
864
+
865
+ return DinatImageClassifierOutput(
866
+ loss=loss,
867
+ logits=logits,
868
+ hidden_states=outputs.hidden_states,
869
+ attentions=outputs.attentions,
870
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
871
+ )
872
+
873
+
874
+ @add_start_docstrings(
875
+ "NAT backbone, to be used with frameworks like DETR and MaskFormer.",
876
+ DINAT_START_DOCSTRING,
877
+ )
878
+ class DinatBackbone(DinatPreTrainedModel, BackboneMixin):
879
+ def __init__(self, config):
880
+ super().__init__(config)
881
+ super()._init_backbone(config)
882
+
883
+ requires_backends(self, ["natten"])
884
+
885
+ self.embeddings = DinatEmbeddings(config)
886
+ self.encoder = DinatEncoder(config)
887
+ self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))]
888
+
889
+ # Add layer norms to hidden states of out_features
890
+ hidden_states_norms = {}
891
+ for stage, num_channels in zip(self._out_features, self.channels):
892
+ hidden_states_norms[stage] = nn.LayerNorm(num_channels)
893
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
894
+
895
+ # Initialize weights and apply final processing
896
+ self.post_init()
897
+
898
+ def get_input_embeddings(self):
899
+ return self.embeddings.patch_embeddings
900
+
901
+ @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING)
902
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
903
+ def forward(
904
+ self,
905
+ pixel_values: torch.Tensor,
906
+ output_hidden_states: Optional[bool] = None,
907
+ output_attentions: Optional[bool] = None,
908
+ return_dict: Optional[bool] = None,
909
+ ) -> BackboneOutput:
910
+ """
911
+ Returns:
912
+
913
+ Examples:
914
+
915
+ ```python
916
+ >>> from transformers import AutoImageProcessor, AutoBackbone
917
+ >>> import torch
918
+ >>> from PIL import Image
919
+ >>> import requests
920
+
921
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
922
+ >>> image = Image.open(requests.get(url, stream=True).raw)
923
+
924
+ >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
925
+ >>> model = AutoBackbone.from_pretrained(
926
+ ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
927
+ ... )
928
+
929
+ >>> inputs = processor(image, return_tensors="pt")
930
+
931
+ >>> outputs = model(**inputs)
932
+
933
+ >>> feature_maps = outputs.feature_maps
934
+ >>> list(feature_maps[-1].shape)
935
+ [1, 512, 7, 7]
936
+ ```"""
937
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
938
+ output_hidden_states = (
939
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
940
+ )
941
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
942
+
943
+ embedding_output = self.embeddings(pixel_values)
944
+
945
+ outputs = self.encoder(
946
+ embedding_output,
947
+ output_attentions=output_attentions,
948
+ output_hidden_states=True,
949
+ output_hidden_states_before_downsampling=True,
950
+ return_dict=True,
951
+ )
952
+
953
+ hidden_states = outputs.reshaped_hidden_states
954
+
955
+ feature_maps = ()
956
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
957
+ if stage in self.out_features:
958
+ batch_size, num_channels, height, width = hidden_state.shape
959
+ hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
960
+ hidden_state = hidden_state.view(batch_size, height * width, num_channels)
961
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
962
+ hidden_state = hidden_state.view(batch_size, height, width, num_channels)
963
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
964
+ feature_maps += (hidden_state,)
965
+
966
+ if not return_dict:
967
+ output = (feature_maps,)
968
+ if output_hidden_states:
969
+ output += (outputs.hidden_states,)
970
+ return output
971
+
972
+ return BackboneOutput(
973
+ feature_maps=feature_maps,
974
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
975
+ attentions=outputs.attentions,
976
+ )
venv/lib/python3.10/site-packages/transformers/models/nllb/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {}
26
+
27
+ try:
28
+ if not is_sentencepiece_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["tokenization_nllb"] = ["NllbTokenizer"]
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ try:
46
+ if not is_sentencepiece_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .tokenization_nllb import NllbTokenizer
52
+
53
+ try:
54
+ if not is_tokenizers_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .tokenization_nllb_fast import NllbTokenizerFast
60
+
61
+ else:
62
+ import sys
63
+
64
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (936 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb/__pycache__/tokenization_nllb_fast.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
34
+
35
+
36
+ class NllbTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct an NLLB tokenizer.
39
+
40
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
41
+ [SentencePiece](https://github.com/google/sentencepiece).
42
+
43
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
44
+ <tokens> <eos>` for target language documents.
45
+
46
+ Examples:
47
+
48
+ ```python
49
+ >>> from transformers import NllbTokenizer
50
+
51
+ >>> tokenizer = NllbTokenizer.from_pretrained(
52
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
53
+ ... )
54
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
55
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
56
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
57
+ ```
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ Path to the vocabulary file.
62
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
63
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
64
+
65
+ <Tip>
66
+
67
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
68
+ sequence. The token used is the `cls_token`.
69
+
70
+ </Tip>
71
+
72
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
73
+ The end of sequence token.
74
+
75
+ <Tip>
76
+
77
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
78
+ The token used is the `sep_token`.
79
+
80
+ </Tip>
81
+
82
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
83
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
84
+ sequence classification or for a text and a question for question answering. It is also used as the last
85
+ token of a sequence built with special tokens.
86
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
87
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
88
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
89
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
90
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
91
+ token instead.
92
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
93
+ The token used for padding, for example when batching sequences of different lengths.
94
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
95
+ The token used for masking values. This is the token used when training this model with masked language
96
+ modeling. This is the token which the model will try to predict.
97
+ tokenizer_file (`str`, *optional*):
98
+ The path to a tokenizer file to use instead of the vocab file.
99
+ src_lang (`str`, *optional*):
100
+ The language to use as source language for translation.
101
+ tgt_lang (`str`, *optional*):
102
+ The language to use as target language for translation.
103
+ sp_model_kwargs (`Dict[str, str]`):
104
+ Additional keyword arguments to pass to the model initialization.
105
+ """
106
+
107
+ vocab_files_names = VOCAB_FILES_NAMES
108
+ model_input_names = ["input_ids", "attention_mask"]
109
+
110
+ prefix_tokens: List[int] = []
111
+ suffix_tokens: List[int] = []
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_file,
116
+ bos_token="<s>",
117
+ eos_token="</s>",
118
+ sep_token="</s>",
119
+ cls_token="<s>",
120
+ unk_token="<unk>",
121
+ pad_token="<pad>",
122
+ mask_token="<mask>",
123
+ tokenizer_file=None,
124
+ src_lang=None,
125
+ tgt_lang=None,
126
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
127
+ additional_special_tokens=None,
128
+ legacy_behaviour=False,
129
+ **kwargs,
130
+ ):
131
+ if additional_special_tokens is None:
132
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
133
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
134
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
135
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
136
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
137
+ # Mask token behave like a normal word, i.e. include the space before it
138
+ mask_token = (
139
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
140
+ if isinstance(mask_token, str)
141
+ else mask_token
142
+ )
143
+
144
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
145
+ self.legacy_behaviour = legacy_behaviour
146
+
147
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
148
+ self.sp_model.Load(str(vocab_file))
149
+ self.vocab_file = vocab_file
150
+ # Original fairseq vocab and spm vocab must be "aligned":
151
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
152
+ # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
153
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
154
+ # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
155
+
156
+ # unk token needs to be in the vocab with correct index
157
+ self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token}
158
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
159
+ self.fairseq_offset = 1
160
+ self.sp_model_size = len(self.sp_model)
161
+
162
+ # Everything that follows is kept for BC and will be removed in v4.38
163
+ self._fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
164
+ language_codes = FAIRSEQ_LANGUAGE_CODES if additional_special_tokens is None else additional_special_tokens
165
+ self._lang_code_to_id = {
166
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(language_codes)
167
+ }
168
+ self._id_to_lang_code = {v: k for k, v in self._lang_code_to_id.items()}
169
+ self._fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
170
+
171
+ self._fairseq_tokens_to_ids.update(self.lang_code_to_id)
172
+ self._fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
173
+
174
+ super().__init__(
175
+ bos_token=bos_token,
176
+ eos_token=eos_token,
177
+ unk_token=unk_token,
178
+ sep_token=sep_token,
179
+ cls_token=cls_token,
180
+ pad_token=pad_token,
181
+ mask_token=mask_token,
182
+ tokenizer_file=tokenizer_file,
183
+ src_lang=src_lang,
184
+ tgt_lang=tgt_lang,
185
+ additional_special_tokens=additional_special_tokens,
186
+ sp_model_kwargs=self.sp_model_kwargs,
187
+ legacy_behaviour=legacy_behaviour,
188
+ **kwargs,
189
+ )
190
+
191
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
192
+ self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang)
193
+ self.tgt_lang = tgt_lang
194
+ self.set_src_lang_special_tokens(self._src_lang)
195
+
196
+ def __getstate__(self):
197
+ state = self.__dict__.copy()
198
+ state["sp_model"] = None
199
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
200
+ return state
201
+
202
+ def __setstate__(self, d):
203
+ self.__dict__ = d
204
+
205
+ # for backward compatibility
206
+ if not hasattr(self, "sp_model_kwargs"):
207
+ self.sp_model_kwargs = {}
208
+
209
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
210
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
211
+
212
+ @property
213
+ def vocab_size(self):
214
+ return len(self.sp_model) + self.fairseq_offset
215
+
216
+ @property
217
+ def src_lang(self) -> str:
218
+ return self._src_lang
219
+
220
+ @property
221
+ def lang_code_to_id(self):
222
+ logger.warning_once(
223
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
224
+ " this attribute will be removed in `transformers` v4.38"
225
+ )
226
+ return self._lang_code_to_id
227
+
228
+ @property
229
+ def fairseq_tokens_to_ids(self):
230
+ logger.warning_once(
231
+ "the `fairseq_tokens_to_ids` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
232
+ " this attribute will be removed in `transformers` v4.38"
233
+ )
234
+ return self._fairseq_tokens_to_ids
235
+
236
+ @property
237
+ def id_to_lang_code(self):
238
+ logger.warning_once(
239
+ "the `id_to_lang_code` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
240
+ " this attribute will be removed in `transformers` v4.38"
241
+ )
242
+ return self._id_to_lang_code
243
+
244
+ @property
245
+ def fairseq_ids_to_tokens(self):
246
+ logger.warning_once(
247
+ "the `_fairseq_ids_to_tokens` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
248
+ " this attribute will be removed in `transformers` v4.38"
249
+ )
250
+ return self._fairseq_ids_to_tokens
251
+
252
+ @src_lang.setter
253
+ def src_lang(self, new_src_lang: str) -> None:
254
+ self._src_lang = new_src_lang
255
+ self.set_src_lang_special_tokens(self._src_lang)
256
+
257
+ def get_special_tokens_mask(
258
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
259
+ ) -> List[int]:
260
+ """
261
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
262
+ special tokens using the tokenizer `prepare_for_model` method.
263
+
264
+ Args:
265
+ token_ids_0 (`List[int]`):
266
+ List of IDs.
267
+ token_ids_1 (`List[int]`, *optional*):
268
+ Optional second list of IDs for sequence pairs.
269
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
270
+ Whether or not the token list is already formatted with special tokens for the model.
271
+
272
+ Returns:
273
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
274
+ """
275
+
276
+ if already_has_special_tokens:
277
+ return super().get_special_tokens_mask(
278
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
279
+ )
280
+
281
+ prefix_ones = [1] * len(self.prefix_tokens)
282
+ suffix_ones = [1] * len(self.suffix_tokens)
283
+ if token_ids_1 is None:
284
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
285
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
286
+
287
+ def build_inputs_with_special_tokens(
288
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
289
+ ) -> List[int]:
290
+ """
291
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
292
+ adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
293
+
294
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
295
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
296
+
297
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
298
+ separator.
299
+
300
+ Args:
301
+ token_ids_0 (`List[int]`):
302
+ List of IDs to which the special tokens will be added.
303
+ token_ids_1 (`List[int]`, *optional*):
304
+ Optional second list of IDs for sequence pairs.
305
+
306
+ Returns:
307
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
308
+ """
309
+ if token_ids_1 is None:
310
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
311
+ # We don't expect to process pairs, but leave the pair logic for API consistency
312
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
313
+
314
+ def create_token_type_ids_from_sequences(
315
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
316
+ ) -> List[int]:
317
+ """
318
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
319
+ make use of token type ids, therefore a list of zeros is returned.
320
+
321
+ Args:
322
+ token_ids_0 (`List[int]`):
323
+ List of IDs.
324
+ token_ids_1 (`List[int]`, *optional*):
325
+ Optional second list of IDs for sequence pairs.
326
+
327
+ Returns:
328
+ `List[int]`: List of zeros.
329
+
330
+ """
331
+
332
+ sep = [self.sep_token_id]
333
+ cls = [self.cls_token_id]
334
+
335
+ if token_ids_1 is None:
336
+ return len(cls + token_ids_0 + sep) * [0]
337
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
338
+
339
+ def _build_translation_inputs(
340
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
341
+ ):
342
+ """Used by translation pipeline, to prepare inputs for the generate function"""
343
+ if src_lang is None or tgt_lang is None:
344
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
345
+ self.src_lang = src_lang
346
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
347
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
348
+ inputs["forced_bos_token_id"] = tgt_lang_id
349
+ return inputs
350
+
351
+ def get_vocab(self):
352
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
353
+ vocab.update(self.added_tokens_encoder)
354
+ return vocab
355
+
356
+ def _tokenize(self, text: str) -> List[str]:
357
+ return self.sp_model.encode(text, out_type=str)
358
+
359
+ def _convert_token_to_id(self, token):
360
+ """Converts a token (str) in an id using the vocab."""
361
+ spm_id = self.sp_model.PieceToId(token)
362
+ # Need to return unknown token if the SP model returned 0
363
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
364
+
365
+ def _convert_id_to_token(self, index):
366
+ """Converts an index (integer) in a token (str) using the vocab."""
367
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
368
+
369
+ def convert_tokens_to_string(self, tokens):
370
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
371
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
372
+ return out_string
373
+
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+
382
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
383
+ copyfile(self.vocab_file, out_vocab_file)
384
+ elif not os.path.isfile(self.vocab_file):
385
+ with open(out_vocab_file, "wb") as fi:
386
+ content_spiece_model = self.sp_model.serialized_model_proto()
387
+ fi.write(content_spiece_model)
388
+
389
+ return (out_vocab_file,)
390
+
391
+ def prepare_seq2seq_batch(
392
+ self,
393
+ src_texts: List[str],
394
+ src_lang: str = "eng_Latn",
395
+ tgt_texts: Optional[List[str]] = None,
396
+ tgt_lang: str = "fra_Latn",
397
+ **kwargs,
398
+ ) -> BatchEncoding:
399
+ self.src_lang = src_lang
400
+ self.tgt_lang = tgt_lang
401
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
402
+
403
+ def _switch_to_input_mode(self):
404
+ return self.set_src_lang_special_tokens(self.src_lang)
405
+
406
+ def _switch_to_target_mode(self):
407
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
408
+
409
+ def set_src_lang_special_tokens(self, src_lang) -> None:
410
+ """Reset the special tokens to the source lang setting.
411
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
412
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
413
+ """
414
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
415
+ if self.legacy_behaviour:
416
+ self.prefix_tokens = []
417
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
418
+ else:
419
+ self.prefix_tokens = [self.cur_lang_code]
420
+ self.suffix_tokens = [self.eos_token_id]
421
+
422
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
423
+ """Reset the special tokens to the target lang setting.
424
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
425
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
426
+ """
427
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
428
+ if self.legacy_behaviour:
429
+ self.prefix_tokens = []
430
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
431
+ else:
432
+ self.prefix_tokens = [self.cur_lang_code]
433
+ self.suffix_tokens = [self.eos_token_id]
venv/lib/python3.10/site-packages/transformers/models/nllb/tokenization_nllb_fast.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_nllb import NllbTokenizer
29
+ else:
30
+ NllbTokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
37
+
38
+
39
+ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
40
+
41
+
42
+ class NllbTokenizerFast(PreTrainedTokenizerFast):
43
+ """
44
+ Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on
45
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
46
+
47
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
48
+ refer to this superclass for more information regarding those methods.
49
+
50
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
51
+ <tokens> <eos>` for target language documents.
52
+
53
+ Examples:
54
+
55
+ ```python
56
+ >>> from transformers import NllbTokenizerFast
57
+
58
+ >>> tokenizer = NllbTokenizerFast.from_pretrained(
59
+ ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
60
+ ... )
61
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
62
+ >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
63
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
64
+ ```
65
+
66
+ Args:
67
+ vocab_file (`str`):
68
+ Path to the vocabulary file.
69
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
70
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
71
+
72
+ <Tip>
73
+
74
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
75
+ sequence. The token used is the `cls_token`.
76
+
77
+ </Tip>
78
+
79
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
80
+ The end of sequence token.
81
+
82
+ <Tip>
83
+
84
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
85
+ The token used is the `sep_token`.
86
+
87
+ </Tip>
88
+
89
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
90
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
91
+ sequence classification or for a text and a question for question answering. It is also used as the last
92
+ token of a sequence built with special tokens.
93
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
94
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
95
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
96
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
97
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
98
+ token instead.
99
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
100
+ The token used for padding, for example when batching sequences of different lengths.
101
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
102
+ The token used for masking values. This is the token used when training this model with masked language
103
+ modeling. This is the token which the model will try to predict.
104
+ tokenizer_file (`str`, *optional*):
105
+ The path to a tokenizer file to use instead of the vocab file.
106
+ src_lang (`str`, *optional*):
107
+ The language to use as source language for translation.
108
+ tgt_lang (`str`, *optional*):
109
+ The language to use as target language for translation.
110
+ """
111
+
112
+ vocab_files_names = VOCAB_FILES_NAMES
113
+ model_input_names = ["input_ids", "attention_mask"]
114
+ slow_tokenizer_class = NllbTokenizer
115
+
116
+ prefix_tokens: List[int] = []
117
+ suffix_tokens: List[int] = []
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_file=None,
122
+ tokenizer_file=None,
123
+ bos_token="<s>",
124
+ eos_token="</s>",
125
+ sep_token="</s>",
126
+ cls_token="<s>",
127
+ unk_token="<unk>",
128
+ pad_token="<pad>",
129
+ mask_token="<mask>",
130
+ src_lang=None,
131
+ tgt_lang=None,
132
+ additional_special_tokens=None,
133
+ legacy_behaviour=False,
134
+ **kwargs,
135
+ ):
136
+ if additional_special_tokens is None:
137
+ additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
138
+
139
+ self.vocab_file = vocab_file
140
+ # Mask token behave like a normal word, i.e. include the space before it
141
+ mask_token = (
142
+ AddedToken(mask_token, normalized=True, lstrip=True, special=True)
143
+ if isinstance(mask_token, str)
144
+ else mask_token
145
+ )
146
+ self.legacy_behaviour = legacy_behaviour
147
+ super().__init__(
148
+ vocab_file=vocab_file,
149
+ tokenizer_file=tokenizer_file,
150
+ bos_token=bos_token,
151
+ eos_token=eos_token,
152
+ sep_token=sep_token,
153
+ cls_token=cls_token,
154
+ unk_token=unk_token,
155
+ pad_token=pad_token,
156
+ src_lang=src_lang,
157
+ tgt_lang=tgt_lang,
158
+ mask_token=mask_token,
159
+ additional_special_tokens=additional_special_tokens,
160
+ legacy_behaviour=legacy_behaviour,
161
+ **kwargs,
162
+ )
163
+
164
+ self._lang_code_to_id = {
165
+ lang_code: self.convert_tokens_to_ids(str(lang_code)) for lang_code in additional_special_tokens
166
+ }
167
+
168
+ self._src_lang = src_lang if src_lang is not None else "eng_Latn"
169
+ self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
170
+ self.tgt_lang = tgt_lang
171
+ self.set_src_lang_special_tokens(self._src_lang)
172
+
173
+ @property
174
+ def lang_code_to_id(self):
175
+ logger.warning_once(
176
+ "the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder`"
177
+ " this attribute will be removed in `transformers` v4.38"
178
+ )
179
+ return self._lang_code_to_id
180
+
181
+ @property
182
+ def can_save_slow_tokenizer(self) -> bool:
183
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
184
+
185
+ @property
186
+ def src_lang(self) -> str:
187
+ return self._src_lang
188
+
189
+ @src_lang.setter
190
+ def src_lang(self, new_src_lang: str) -> None:
191
+ self._src_lang = new_src_lang
192
+ self.set_src_lang_special_tokens(self._src_lang)
193
+
194
+ def build_inputs_with_special_tokens(
195
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
196
+ ) -> List[int]:
197
+ """
198
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
199
+ adding special tokens. The special tokens depend on calling set_lang.
200
+
201
+ An NLLB sequence has the following format, where `X` represents the sequence:
202
+
203
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
204
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
205
+
206
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
207
+ separator.
208
+
209
+ Args:
210
+ token_ids_0 (`List[int]`):
211
+ List of IDs to which the special tokens will be added.
212
+ token_ids_1 (`List[int]`, *optional*):
213
+ Optional second list of IDs for sequence pairs.
214
+
215
+ Returns:
216
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
217
+ """
218
+ if token_ids_1 is None:
219
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
220
+ # We don't expect to process pairs, but leave the pair logic for API consistency
221
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
222
+
223
+ def create_token_type_ids_from_sequences(
224
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
225
+ ) -> List[int]:
226
+ """
227
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
228
+ make use of token type ids, therefore a list of zeros is returned.
229
+
230
+ Args:
231
+ token_ids_0 (`List[int]`):
232
+ List of IDs.
233
+ token_ids_1 (`List[int]`, *optional*):
234
+ Optional second list of IDs for sequence pairs.
235
+
236
+ Returns:
237
+ `List[int]`: List of zeros.
238
+
239
+ """
240
+
241
+ sep = [self.sep_token_id]
242
+ cls = [self.cls_token_id]
243
+
244
+ if token_ids_1 is None:
245
+ return len(cls + token_ids_0 + sep) * [0]
246
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
247
+
248
+ def _build_translation_inputs(
249
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
250
+ ):
251
+ """Used by translation pipeline, to prepare inputs for the generate function"""
252
+ if src_lang is None or tgt_lang is None:
253
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
254
+ self.src_lang = src_lang
255
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
256
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
257
+ inputs["forced_bos_token_id"] = tgt_lang_id
258
+ return inputs
259
+
260
+ def prepare_seq2seq_batch(
261
+ self,
262
+ src_texts: List[str],
263
+ src_lang: str = "eng_Latn",
264
+ tgt_texts: Optional[List[str]] = None,
265
+ tgt_lang: str = "fra_Latn",
266
+ **kwargs,
267
+ ) -> BatchEncoding:
268
+ self.src_lang = src_lang
269
+ self.tgt_lang = tgt_lang
270
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
271
+
272
+ def _switch_to_input_mode(self):
273
+ return self.set_src_lang_special_tokens(self.src_lang)
274
+
275
+ def _switch_to_target_mode(self):
276
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
277
+
278
+ def set_src_lang_special_tokens(self, src_lang) -> None:
279
+ """Reset the special tokens to the source lang setting.
280
+ - In legacy mode: No prefix and suffix=[eos, src_lang_code].
281
+ - In default mode: Prefix=[src_lang_code], suffix = [eos]
282
+ """
283
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
284
+
285
+ if self.legacy_behaviour:
286
+ self.prefix_tokens = []
287
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
288
+ else:
289
+ self.prefix_tokens = [self.cur_lang_code]
290
+ self.suffix_tokens = [self.eos_token_id]
291
+
292
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
293
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
294
+
295
+ self._tokenizer.post_processor = processors.TemplateProcessing(
296
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
297
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
298
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
299
+ )
300
+
301
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
302
+ """Reset the special tokens to the target lang setting.
303
+ - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
304
+ - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
305
+ """
306
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
307
+ if self.legacy_behaviour:
308
+ self.prefix_tokens = []
309
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
310
+ else:
311
+ self.prefix_tokens = [self.cur_lang_code]
312
+ self.suffix_tokens = [self.eos_token_id]
313
+
314
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
315
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
316
+
317
+ self._tokenizer.post_processor = processors.TemplateProcessing(
318
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
319
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
320
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
321
+ )
322
+
323
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
324
+ if not self.can_save_slow_tokenizer:
325
+ raise ValueError(
326
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
327
+ "tokenizer."
328
+ )
329
+
330
+ if not os.path.isdir(save_directory):
331
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
332
+ return
333
+ out_vocab_file = os.path.join(
334
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
335
+ )
336
+
337
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
338
+ copyfile(self.vocab_file, out_vocab_file)
339
+
340
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_nllb_moe": [
22
+ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "NllbMoeConfig",
24
+ ]
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_nllb_moe"] = [
34
+ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "NllbMoeForConditionalGeneration",
36
+ "NllbMoeModel",
37
+ "NllbMoePreTrainedModel",
38
+ "NllbMoeTop2Router",
39
+ "NllbMoeSparseMLP",
40
+ ]
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_nllb_moe import (
45
+ NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
46
+ NllbMoeConfig,
47
+ )
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .modeling_nllb_moe import (
56
+ NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
57
+ NllbMoeForConditionalGeneration,
58
+ NllbMoeModel,
59
+ NllbMoePreTrainedModel,
60
+ NllbMoeSparseMLP,
61
+ NllbMoeTop2Router,
62
+ )
63
+
64
+
65
+ else:
66
+ import sys
67
+
68
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/configuration_nllb_moe.cpython-310.pyc ADDED
Binary file (9.48 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.46 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/__pycache__/modeling_nllb_moe.cpython-310.pyc ADDED
Binary file (56.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023, HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ NLLB-MoE model configuration"""
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
24
+
25
+
26
+ class NllbMoeConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an
29
+ NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the NLLB-MoE
31
+ [facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 50265):
39
+ Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`NllbMoeModel`] or
41
+ d_model (`int`, *optional*, defaults to 1024):
42
+ Dimensionality of the layers and the pooler layer.
43
+ encoder_layers (`int`, *optional*, defaults to 12):
44
+ Number of encoder layers.
45
+ decoder_layers (`int`, *optional*, defaults to 12):
46
+ Number of decoder layers.
47
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
53
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
54
+ Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
55
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_dropout (`float`, *optional*, defaults to 0.0):
61
+ The dropout ratio for the attention probabilities.
62
+ activation_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for activations inside the fully connected layer.
64
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
65
+ The dropout ratio for classifier.
66
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ init_std (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
72
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
73
+ for more details.
74
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
75
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
76
+ for more details.
77
+ second_expert_policy ( `str`, *optional*, default to `"all"`):
78
+ The policy used for the sampling the probability of being sampled to a second expert for each token.
79
+ normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`):
80
+ Whether or not to normalize the router probabilities before applying a mask based on the experts capacity
81
+ (capacity dropping).
82
+ batch_prioritized_routing (`bool`, *optional*, defaults to `True`):
83
+ Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that
84
+ the tokens that have the highest probabilities will be routed before other tokens that might be further in
85
+ the sequence.
86
+ moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0):
87
+ Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be
88
+ in range: (0.0, 1.0].
89
+ num_experts (`int`, *optional*, defaults to 128):
90
+ Number of experts for each NllbMoeSparseMlp layer.
91
+ expert_capacity (`int`, *optional*, defaults to 64):
92
+ Number of tokens that can be stored in each expert.
93
+ encoder_sparse_step (`int`, *optional*, defaults to 4):
94
+ Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse.
95
+ decoder_sparse_step (`int`, *optional*, defaults to 4):
96
+ Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse.
97
+ router_dtype (`str`, *optional*, default to `"float32"`):
98
+ The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
99
+ *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
100
+ router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
101
+ Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any
102
+ experts.
103
+ router_bias (`bool`, *optional*, defaults to `False`):
104
+ Whether or not the classifier of the router should have a bias.
105
+ moe_token_dropout (`float`, *optional*, defualt ot 0.2):
106
+ Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert
107
+ outputs.
108
+ output_router_logits (`bool`, *optional*, defaults to `False`):
109
+ Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training.
110
+ use_cache (`bool`, *optional*, defaults to `True`):
111
+ Whether or not the model should return the last key/values attentions (not used by all models).
112
+
113
+ Example:
114
+
115
+ ```python
116
+ >>> from transformers import NllbMoeModel, NllbMoeConfig
117
+
118
+ >>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration
119
+ >>> configuration = NllbMoeConfig()
120
+
121
+ >>> # Initializing a model from the facebook/nllb-moe-54b style configuration
122
+ >>> model = NllbMoeModel(configuration)
123
+
124
+ >>> # Accessing the model configuration
125
+ >>> configuration = model.config
126
+ ```"""
127
+
128
+ model_type = "nllb-moe"
129
+ keys_to_ignore_at_inference = ["past_key_values"]
130
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
131
+
132
+ def __init__(
133
+ self,
134
+ vocab_size=128112,
135
+ max_position_embeddings=1024,
136
+ encoder_layers=12,
137
+ encoder_ffn_dim=4096,
138
+ encoder_attention_heads=16,
139
+ decoder_layers=12,
140
+ decoder_ffn_dim=4096,
141
+ decoder_attention_heads=16,
142
+ encoder_layerdrop=0.05,
143
+ decoder_layerdrop=0.05,
144
+ use_cache=True,
145
+ is_encoder_decoder=True,
146
+ activation_function="relu",
147
+ d_model=1024,
148
+ dropout=0.1,
149
+ attention_dropout=0.1,
150
+ activation_dropout=0.0,
151
+ init_std=0.02,
152
+ decoder_start_token_id=2,
153
+ scale_embedding=True,
154
+ router_bias=False,
155
+ router_dtype="float32",
156
+ router_ignore_padding_tokens=False,
157
+ num_experts=128,
158
+ expert_capacity=64,
159
+ encoder_sparse_step=4,
160
+ decoder_sparse_step=4,
161
+ router_z_loss_coef=0.001,
162
+ router_aux_loss_coef=0.001,
163
+ second_expert_policy="all",
164
+ normalize_router_prob_before_dropping=False,
165
+ batch_prioritized_routing=False,
166
+ moe_eval_capacity_token_fraction=1.0,
167
+ moe_token_dropout=0.2,
168
+ pad_token_id=1,
169
+ bos_token_id=0,
170
+ eos_token_id=2,
171
+ output_router_logits=False,
172
+ **kwargs,
173
+ ):
174
+ self.vocab_size = vocab_size
175
+ self.max_position_embeddings = max_position_embeddings
176
+ self.d_model = d_model
177
+ self.encoder_ffn_dim = encoder_ffn_dim
178
+ self.encoder_layers = encoder_layers
179
+ self.encoder_attention_heads = encoder_attention_heads
180
+ self.decoder_ffn_dim = decoder_ffn_dim
181
+ self.decoder_layers = decoder_layers
182
+ self.decoder_attention_heads = decoder_attention_heads
183
+ self.dropout = dropout
184
+ self.attention_dropout = attention_dropout
185
+ self.activation_dropout = activation_dropout
186
+ self.activation_function = activation_function
187
+ self.init_std = init_std
188
+ self.encoder_layerdrop = encoder_layerdrop
189
+ self.decoder_layerdrop = decoder_layerdrop
190
+ self.use_cache = use_cache
191
+ self.num_hidden_layers = encoder_layers
192
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
193
+ self.router_z_loss_coef = router_z_loss_coef
194
+ self.router_aux_loss_coef = router_aux_loss_coef
195
+ self.decoder_sparse_step = decoder_sparse_step
196
+ self.encoder_sparse_step = encoder_sparse_step
197
+ self.num_experts = num_experts
198
+ self.expert_capacity = expert_capacity
199
+ self.router_bias = router_bias
200
+ if router_dtype not in ["float32", "float16", "bfloat16"]:
201
+ raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
202
+ self.router_dtype = router_dtype
203
+
204
+ self.router_ignore_padding_tokens = router_ignore_padding_tokens
205
+ self.batch_prioritized_routing = batch_prioritized_routing
206
+ self.second_expert_policy = second_expert_policy
207
+ self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping
208
+ self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
209
+ self.moe_token_dropout = moe_token_dropout
210
+ self.output_router_logits = output_router_logits
211
+ super().__init__(
212
+ pad_token_id=pad_token_id,
213
+ bos_token_id=bos_token_id,
214
+ eos_token_id=eos_token_id,
215
+ is_encoder_decoder=is_encoder_decoder,
216
+ decoder_start_token_id=decoder_start_token_id,
217
+ **kwargs,
218
+ )
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import json
16
+ import os
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from transformers import NllbMoeConfig, NllbMoeModel
22
+ from transformers.modeling_utils import dtype_byte_size
23
+ from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
24
+
25
+
26
+ def remove_ignore_keys_(state_dict):
27
+ ignore_keys = [
28
+ "encoder.version",
29
+ "decoder.version",
30
+ "model.encoder.version",
31
+ "model.decoder.version",
32
+ "decoder.output_projection.weight",
33
+ "_float_tensor",
34
+ "encoder.embed_positions._float_tensor",
35
+ "decoder.embed_positions._float_tensor",
36
+ ]
37
+ for k in ignore_keys:
38
+ state_dict.pop(k, None)
39
+
40
+
41
+ def make_linear_from_emb(emb):
42
+ vocab_size, emb_size = emb.weight.shape
43
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
44
+ lin_layer.weight.data = emb.weight.data
45
+ return lin_layer
46
+
47
+
48
+ def rename_fairseq_keys(state_dict, expert_idx=None):
49
+ new_dict = {}
50
+ for old_key in state_dict.keys():
51
+ key = old_key
52
+ if "moe_layer.experts." in key:
53
+ if expert_idx is not None:
54
+ key = key.replace("moe_layer.experts.0", f"ffn.experts.expert_{expert_idx}")
55
+ else:
56
+ key = key.replace("moe_layer.experts.", "ffn.experts.expert_")
57
+ if "gate" in key:
58
+ key = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier")
59
+ if "fc2" and "experts" not in key:
60
+ key = key.replace(".fc2.", ".ffn.fc2.")
61
+ if "fc1" and "experts" not in key:
62
+ key = key.replace(".fc1.", ".ffn.fc1.")
63
+ if ".encoder_attn." in key:
64
+ key = key.replace(".encoder_attn.", ".cross_attention.")
65
+ if "encoder_attn_layer_norm" in key:
66
+ key = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm")
67
+ if "final_layer_norm" in key:
68
+ key = key.replace("final_layer_norm", "ff_layer_norm")
69
+ new_dict[key] = state_dict[old_key]
70
+ return new_dict
71
+
72
+
73
+ def shard_on_the_fly(switch_checkpoint_path, dump_path, num_experts, dtype, weights_name: str = WEIGHTS_NAME):
74
+ sharded_state_dicts = []
75
+ total_size = 0
76
+ os.makedirs(dump_path, exist_ok=True)
77
+
78
+ for expert in range(num_experts):
79
+ expert_path = switch_checkpoint_path + f"-rank-{expert}.pt"
80
+ if os.path.isfile(expert_path):
81
+ expert_state = torch.load(expert_path)["model"]
82
+ remove_ignore_keys_(expert_state)
83
+ expert_state = rename_fairseq_keys(expert_state, expert)
84
+ save_path = os.path.join(
85
+ dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin")
86
+ )
87
+ torch.save(expert_state, save_path)
88
+ sharded_state_dicts.append(expert_state.keys())
89
+ total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
90
+ expert_state[list(expert_state)[0]].dtype
91
+ )
92
+
93
+ # Add the last block
94
+ save_path = os.path.join(dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin"))
95
+ shared_weights = torch.load(switch_checkpoint_path + "-shared.pt")["model"]
96
+ remove_ignore_keys_(shared_weights)
97
+ shared_weights = rename_fairseq_keys(shared_weights, None)
98
+ shared_weights["shared.weight"] = shared_weights["decoder.embed_tokens.weight"]
99
+ sharded_state_dicts.append(shared_weights.keys())
100
+
101
+ # If we only have the shared weights (dummy model/experts saved on the same file)
102
+ if len(sharded_state_dicts) == 1:
103
+ save_path = os.path.join(dump_path, weights_name)
104
+ torch.save(shared_weights, save_path)
105
+ return {weights_name: sharded_state_dicts[0]}, None
106
+ else:
107
+ torch.save(shared_weights, save_path)
108
+ # Otherwise, let's build the index
109
+ weight_map = {}
110
+ for idx, shard in enumerate(sharded_state_dicts):
111
+ shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
112
+ temp_filename = os.path.join(dump_path, weights_name.replace(".bin", f"-{idx+1:05d}-of-???.bin"))
113
+ os.rename(temp_filename, os.path.join(dump_path, shard_file))
114
+ for key in shard:
115
+ weight_map[key] = shard_file
116
+
117
+ # Add the metadata
118
+ metadata = {"total_size": total_size}
119
+ index = {"metadata": metadata, "weight_map": weight_map}
120
+
121
+ with open(os.path.join(dump_path, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
122
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
123
+ f.write(content)
124
+
125
+ return metadata, index
126
+
127
+
128
+ if __name__ == "__main__":
129
+ parser = argparse.ArgumentParser()
130
+ # Required parameters
131
+ parser.add_argument(
132
+ "--nllb_moe_checkpoint_path",
133
+ default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
134
+ type=str,
135
+ required=False,
136
+ help="Path to a directory containing a folder per layer. Follows the original Google format.",
137
+ )
138
+ parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
139
+ parser.add_argument(
140
+ "--pytorch_dump_folder_path",
141
+ default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
142
+ type=str,
143
+ required=False,
144
+ help="Path to the output pytorch model.",
145
+ )
146
+ args = parser.parse_args()
147
+ metadata, index = shard_on_the_fly(
148
+ args.nllb_moe_checkpoint_path,
149
+ args.pytorch_dump_folder_path,
150
+ 128,
151
+ args.dtype,
152
+ )
153
+
154
+ config = NllbMoeConfig.from_pretrained(
155
+ "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
156
+ )
157
+ config.save_pretrained(args.pytorch_dump_folder_path)
158
+ model = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
159
+ print("Done")
160
+ model.save_pretrained(args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py ADDED
@@ -0,0 +1,1792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 NllbMoe Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch NLLB-MoE model."""
16
+
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ from torch.nn import CrossEntropyLoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
27
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
28
+ from ...modeling_outputs import (
29
+ MoEModelOutput,
30
+ MoEModelOutputWithPastAndCrossAttentions,
31
+ Seq2SeqMoEModelOutput,
32
+ Seq2SeqMoEOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_end_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_nllb_moe import NllbMoeConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = "NllbMoeConfig"
48
+ _CHECKPOINT_FOR_DOC = "hf-internal-testing/dummy-nllb-moe-2-experts"
49
+ _REAL_CHECKPOINT_FOR_DOC = "facebook/nllb-moe-54b"
50
+
51
+
52
+ ####################################################
53
+ # This dict contains ids and associated url
54
+ # for the pretrained weights provided with the models
55
+ ####################################################
56
+
57
+ from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
61
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
62
+ """
63
+ Shift input ids one token to the right.
64
+ """
65
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
66
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
67
+ shifted_input_ids[:, 0] = decoder_start_token_id
68
+
69
+ if pad_token_id is None:
70
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
71
+ # replace possible -100 values in labels by `pad_token_id`
72
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
73
+
74
+ return shifted_input_ids
75
+
76
+
77
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
78
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
79
+ """
80
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
81
+ are ignored. This is modified from fairseq's `utils.make_positions`.
82
+
83
+ Args:
84
+ x: torch.Tensor x:
85
+
86
+ Returns: torch.Tensor
87
+ """
88
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
89
+ mask = input_ids.ne(padding_idx).int()
90
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
91
+ return incremental_indices.long() + padding_idx
92
+
93
+
94
+ def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
95
+ r"""
96
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
97
+
98
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
99
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
100
+ experts is too unbalanced.
101
+
102
+ Args:
103
+ router_probs (`torch.Tensor`):
104
+ Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
105
+ expert_indices (`torch.Tensor`):
106
+ Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
107
+
108
+ Returns:
109
+ The auxiliary loss.
110
+ """
111
+ if router_probs is None:
112
+ return 0
113
+
114
+ num_experts = router_probs.shape[-1]
115
+
116
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
117
+ if expert_indices.dtype != torch.int64:
118
+ expert_indices = expert_indices.to(torch.int64)
119
+
120
+ if len(expert_indices.shape) == 2:
121
+ expert_indices = expert_indices.unsqueeze(2)
122
+
123
+ expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
124
+
125
+ # For a given token, determine if it was routed to a given expert.
126
+ expert_mask = torch.max(expert_mask, axis=-2).values
127
+
128
+ # cast to float32 otherwise mean will fail
129
+ expert_mask = expert_mask.to(torch.float32)
130
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
131
+
132
+ router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
133
+ return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
134
+
135
+
136
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding
137
+ class NllbMoeSinusoidalPositionalEmbedding(nn.Module):
138
+ """This module produces sinusoidal positional embeddings of any length."""
139
+
140
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
141
+ super().__init__()
142
+ self.offset = 2
143
+ self.embedding_dim = embedding_dim
144
+ self.padding_idx = padding_idx
145
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
146
+
147
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
148
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
149
+ if hasattr(self, "weights"):
150
+ # in forward put the weights on the correct dtype and device of the param
151
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
152
+
153
+ self.register_buffer("weights", emb_weights, persistent=False)
154
+
155
+ @staticmethod
156
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
157
+ """
158
+ Build sinusoidal embeddings.
159
+
160
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
161
+ "Attention Is All You Need".
162
+ """
163
+ half_dim = embedding_dim // 2
164
+ emb = math.log(10000) / (half_dim - 1)
165
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
166
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
167
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
168
+ if embedding_dim % 2 == 1:
169
+ # zero pad
170
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
171
+ if padding_idx is not None:
172
+ emb[padding_idx, :] = 0
173
+
174
+ return emb.to(torch.get_default_dtype())
175
+
176
+ @torch.no_grad()
177
+ def forward(
178
+ self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
179
+ ):
180
+ if input_ids is not None:
181
+ bsz, seq_len = input_ids.size()
182
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
183
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
184
+ input_ids.device
185
+ )
186
+ else:
187
+ bsz, seq_len = inputs_embeds.size()[:-1]
188
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
189
+
190
+ # expand embeddings if needed
191
+ max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
192
+ if max_pos > self.weights.size(0):
193
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
194
+
195
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
196
+
197
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
198
+ """
199
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
200
+
201
+ Args:
202
+ inputs_embeds: torch.Tensor
203
+
204
+ Returns: torch.Tensor
205
+ """
206
+ input_shape = inputs_embeds.size()[:-1]
207
+ sequence_length = input_shape[1]
208
+
209
+ position_ids = torch.arange(
210
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
211
+ )
212
+ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
213
+
214
+
215
+ class NllbMoeTop2Router(nn.Module):
216
+ """
217
+ Router using tokens choose top-2 experts assignment.
218
+
219
+ This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs
220
+ and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee
221
+ that each token is processed by an expert**, or that each expert receives at least one token.
222
+
223
+ The router combining weights are also returned to make sure that the states that are not updated will be masked.
224
+
225
+ """
226
+
227
+ def __init__(self, config: NllbMoeConfig):
228
+ super().__init__()
229
+ self.num_experts = config.num_experts
230
+ self.expert_capacity = config.expert_capacity
231
+ self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
232
+ self.router_ignore_padding_tokens = config.router_ignore_padding_tokens
233
+ self.dtype = getattr(torch, config.router_dtype)
234
+
235
+ self.second_expert_policy = config.second_expert_policy
236
+ self.normalize_router_prob_before_dropping = config.normalize_router_prob_before_dropping
237
+ self.batch_prioritized_routing = config.batch_prioritized_routing
238
+ self.moe_eval_capacity_token_fraction = config.moe_eval_capacity_token_fraction
239
+
240
+ def _cast_classifier(self):
241
+ r"""
242
+ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
243
+ instance of the `Linear8bitLt` class by checking special attributes.
244
+ """
245
+ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
246
+ self.classifier = self.classifier.to(self.dtype)
247
+
248
+ def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask):
249
+ top_1_max_probs = (router_probs * top_1_mask).sum(dim=1)
250
+ top_2_max_probs = (router_probs * top_2_mask).sum(dim=1)
251
+ denom_s = torch.clamp(top_1_max_probs + top_2_max_probs, min=torch.finfo(router_probs.dtype).eps)
252
+ top_1_max_probs = top_1_max_probs / denom_s
253
+ top_2_max_probs = top_2_max_probs / denom_s
254
+ return top_1_max_probs, top_2_max_probs
255
+
256
+ def route_tokens(
257
+ self,
258
+ router_logits: torch.Tensor,
259
+ input_dtype: torch.dtype = torch.float32,
260
+ padding_mask: Optional[torch.LongTensor] = None,
261
+ ) -> Tuple:
262
+ """
263
+ Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert
264
+ capacity.
265
+ """
266
+ nb_tokens = router_logits.shape[0]
267
+ # Apply Softmax and cast back to the original `dtype`
268
+ router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(input_dtype)
269
+ top_1_expert_index = torch.argmax(router_probs, dim=-1)
270
+ top_1_mask = torch.nn.functional.one_hot(top_1_expert_index, num_classes=self.num_experts)
271
+
272
+ if self.second_expert_policy == "sampling":
273
+ gumbel = torch.distributions.gumbel.Gumbel(0, 1).rsample
274
+ router_logits += gumbel(router_logits.shape).to(router_logits.device)
275
+
276
+ # replace top_1_expert_index with min values
277
+ logits_except_top_1 = router_logits.masked_fill(top_1_mask.bool(), float("-inf"))
278
+ top_2_expert_index = torch.argmax(logits_except_top_1, dim=-1)
279
+ top_2_mask = torch.nn.functional.one_hot(top_2_expert_index, num_classes=self.num_experts)
280
+
281
+ if self.normalize_router_prob_before_dropping:
282
+ top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(
283
+ router_probs, top_1_mask, top_2_mask
284
+ )
285
+
286
+ if self.second_expert_policy == "random":
287
+ top_2_max_probs = (router_probs * top_2_mask).sum(dim=1)
288
+ sampled = (2 * top_2_max_probs) > torch.rand_like(top_2_max_probs.float())
289
+ top_2_mask = top_2_mask * sampled.repeat(self.num_experts, 1).transpose(1, 0)
290
+
291
+ if padding_mask is not None and not self.router_ignore_padding_tokens:
292
+ if len(padding_mask.shape) == 4:
293
+ # only get the last causal mask
294
+ padding_mask = padding_mask[:, :, -1, :].reshape(-1)[-nb_tokens:]
295
+ non_padding = ~padding_mask.bool()
296
+ top_1_mask = top_1_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype)
297
+ top_2_mask = top_2_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype)
298
+
299
+ if self.batch_prioritized_routing:
300
+ # sort tokens based on their routing probability
301
+ # to make sure important tokens are routed, first
302
+ importance_scores = -1 * router_probs.max(dim=1)[0]
303
+ sorted_top_1_mask = top_1_mask[importance_scores.argsort(dim=0)]
304
+ sorted_cumsum1 = (torch.cumsum(sorted_top_1_mask, dim=0) - 1) * sorted_top_1_mask
305
+ locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)]
306
+
307
+ sorted_top_2_mask = top_2_mask[importance_scores.argsort(dim=0)]
308
+ sorted_cumsum2 = (torch.cumsum(sorted_top_2_mask, dim=0) - 1) * sorted_top_2_mask
309
+ locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)]
310
+ # Update 2nd's location by accounting for locations of 1st
311
+ locations2 += torch.sum(top_1_mask, dim=0, keepdim=True)
312
+
313
+ else:
314
+ locations1 = torch.cumsum(top_1_mask, dim=0) - 1
315
+ locations2 = torch.cumsum(top_2_mask, dim=0) - 1
316
+ # Update 2nd's location by accounting for locations of 1st
317
+ locations2 += torch.sum(top_1_mask, dim=0, keepdim=True)
318
+
319
+ if not self.training and self.moe_eval_capacity_token_fraction > 0:
320
+ self.expert_capacity = math.ceil(self.moe_eval_capacity_token_fraction * nb_tokens)
321
+ else:
322
+ capacity = 2 * math.ceil(nb_tokens / self.num_experts)
323
+ self.expert_capacity = capacity if self.expert_capacity is None else self.expert_capacity
324
+
325
+ # Remove locations outside capacity from ( cumsum < capacity = False will not be routed)
326
+ top_1_mask = top_1_mask * torch.lt(locations1, self.expert_capacity)
327
+ top_2_mask = top_2_mask * torch.lt(locations2, self.expert_capacity)
328
+
329
+ if not self.normalize_router_prob_before_dropping:
330
+ top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(
331
+ router_probs, top_1_mask, top_2_mask
332
+ )
333
+
334
+ # Calculate combine_weights and dispatch_mask
335
+ gates1 = top_1_max_probs[:, None] * top_1_mask
336
+ gates2 = top_2_max_probs[:, None] * top_2_mask
337
+ router_probs = gates1 + gates2
338
+
339
+ return top_1_mask, router_probs
340
+
341
+ def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor] = None) -> Tuple:
342
+ r"""
343
+ The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for
344
+ each experts.)
345
+
346
+ Args:
347
+ hidden_states (`torch.Tensor`):
348
+ (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
349
+ Returns:
350
+ top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)):
351
+ Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token
352
+ using the top1 probabilities of the router.
353
+ router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)):
354
+ Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
355
+ token and expert. Used for routing tokens to experts.
356
+ router_logits (`torch.Tensor` of shape (batch_size, sequence_length))):
357
+ Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
358
+ This is used later for computing router z-loss.
359
+ """
360
+ self.input_dtype = hidden_states.dtype
361
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
362
+ hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim)
363
+ hidden_states = hidden_states.to(self.dtype)
364
+ self._cast_classifier()
365
+ router_logits = self.classifier(hidden_states)
366
+ top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask)
367
+ return top_1_mask, router_probs
368
+
369
+
370
+ class NllbMoeDenseActDense(nn.Module):
371
+ def __init__(self, config: NllbMoeConfig, ffn_dim: int):
372
+ super().__init__()
373
+ self.fc1 = nn.Linear(config.d_model, ffn_dim)
374
+ self.fc2 = nn.Linear(ffn_dim, config.d_model)
375
+ self.dropout = nn.Dropout(config.activation_dropout)
376
+ self.act = ACT2FN[config.activation_function]
377
+
378
+ def forward(self, hidden_states):
379
+ hidden_states = self.fc1(hidden_states)
380
+ hidden_states = self.act(hidden_states)
381
+ hidden_states = self.dropout(hidden_states)
382
+ if (
383
+ isinstance(self.fc2.weight, torch.Tensor)
384
+ and hidden_states.dtype != self.fc2.weight.dtype
385
+ and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
386
+ ):
387
+ hidden_states = hidden_states.to(self.fc2.weight.dtype)
388
+ hidden_states = self.fc2(hidden_states)
389
+ return hidden_states
390
+
391
+
392
+ class NllbMoeSparseMLP(nn.Module):
393
+ r"""
394
+ Implementation of the NLLB-MoE sparse MLP module.
395
+ """
396
+
397
+ def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module = NllbMoeDenseActDense):
398
+ super().__init__()
399
+ self.router = NllbMoeTop2Router(config)
400
+ self.moe_token_dropout = config.moe_token_dropout
401
+ self.token_dropout = nn.Dropout(self.moe_token_dropout)
402
+ self.num_experts = config.num_experts
403
+
404
+ self.experts = nn.ModuleDict()
405
+ for idx in range(self.num_experts):
406
+ self.experts[f"expert_{idx}"] = expert_class(config, ffn_dim)
407
+
408
+ def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor] = False):
409
+ r"""
410
+ The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense`
411
+ (mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a
412
+ top_2 gating mecanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim)
413
+ instead of O(num_experts x batch_size x sequence_length x hidden_dim).
414
+
415
+ 1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length,
416
+ num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the
417
+ `router_mask`.
418
+
419
+ 2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the
420
+ contribution of each experts when updating the masked hidden states.
421
+
422
+ Args:
423
+ hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):
424
+ The hidden states
425
+ padding_mask (`torch.Tensor`, *optional*, defaults to `False`):
426
+ Attention mask. Can be in the causal form or not.
427
+
428
+ Returns:
429
+ hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):
430
+ Updated hidden states
431
+ router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`):
432
+ Needed for computing the loss
433
+
434
+ """
435
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
436
+
437
+ top_1_mask, router_probs = self.router(hidden_states, padding_mask)
438
+ router_mask = router_probs.bool()
439
+ hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim)
440
+ masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask)
441
+ for idx, expert in enumerate(self.experts.values()):
442
+ token_indices = router_mask[:, idx]
443
+ combining_weights = router_probs[token_indices, idx]
444
+ expert_output = expert(masked_hidden_states[idx, token_indices])
445
+ if self.moe_token_dropout > 0:
446
+ if self.training:
447
+ expert_output = self.token_dropout(expert_output)
448
+ else:
449
+ expert_output *= 1 - self.moe_token_dropout
450
+ masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output)
451
+ hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim)
452
+
453
+ top_1_expert_index = torch.argmax(top_1_mask, dim=-1)
454
+ return hidden_states, (router_probs, top_1_expert_index)
455
+
456
+
457
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->NllbMoe,key_value_states->encoder_hidden_states
458
+ class NllbMoeAttention(nn.Module):
459
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
460
+
461
+ def __init__(
462
+ self,
463
+ embed_dim: int,
464
+ num_heads: int,
465
+ dropout: float = 0.0,
466
+ is_decoder: bool = False,
467
+ bias: bool = True,
468
+ is_causal: bool = False,
469
+ config: Optional[NllbMoeConfig] = None,
470
+ ):
471
+ super().__init__()
472
+ self.embed_dim = embed_dim
473
+ self.num_heads = num_heads
474
+ self.dropout = dropout
475
+ self.head_dim = embed_dim // num_heads
476
+ self.config = config
477
+
478
+ if (self.head_dim * num_heads) != self.embed_dim:
479
+ raise ValueError(
480
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
481
+ f" and `num_heads`: {num_heads})."
482
+ )
483
+ self.scaling = self.head_dim**-0.5
484
+ self.is_decoder = is_decoder
485
+ self.is_causal = is_causal
486
+
487
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
488
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
489
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
490
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
491
+
492
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
493
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
494
+
495
+ def forward(
496
+ self,
497
+ hidden_states: torch.Tensor,
498
+ encoder_hidden_states: Optional[torch.Tensor] = None,
499
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
500
+ attention_mask: Optional[torch.Tensor] = None,
501
+ layer_head_mask: Optional[torch.Tensor] = None,
502
+ output_attentions: bool = False,
503
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
504
+ """Input shape: Batch x Time x Channel"""
505
+
506
+ # if encoder_hidden_states are provided this layer is used as a cross-attention layer
507
+ # for the decoder
508
+ is_cross_attention = encoder_hidden_states is not None
509
+
510
+ bsz, tgt_len, _ = hidden_states.size()
511
+
512
+ # get query proj
513
+ query_states = self.q_proj(hidden_states) * self.scaling
514
+ # get key, value proj
515
+ # `past_key_value[0].shape[2] == encoder_hidden_states.shape[1]`
516
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
517
+ # the provided `encoder_hidden_states` to support prefix tuning
518
+ if (
519
+ is_cross_attention
520
+ and past_key_value is not None
521
+ and past_key_value[0].shape[2] == encoder_hidden_states.shape[1]
522
+ ):
523
+ # reuse k,v, cross_attentions
524
+ key_states = past_key_value[0]
525
+ value_states = past_key_value[1]
526
+ elif is_cross_attention:
527
+ # cross_attentions
528
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
529
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
530
+ elif past_key_value is not None:
531
+ # reuse k, v, self_attention
532
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
533
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
534
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
535
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
536
+ else:
537
+ # self_attention
538
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
539
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
540
+
541
+ if self.is_decoder:
542
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
543
+ # Further calls to cross_attention layer can then reuse all cross-attention
544
+ # key/value_states (first "if" case)
545
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
546
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
547
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
548
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
549
+ past_key_value = (key_states, value_states)
550
+
551
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
552
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
553
+ key_states = key_states.reshape(*proj_shape)
554
+ value_states = value_states.reshape(*proj_shape)
555
+
556
+ src_len = key_states.size(1)
557
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
558
+
559
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
560
+ raise ValueError(
561
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
562
+ f" {attn_weights.size()}"
563
+ )
564
+
565
+ if attention_mask is not None:
566
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
567
+ raise ValueError(
568
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
569
+ )
570
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
571
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
572
+
573
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
574
+
575
+ if layer_head_mask is not None:
576
+ if layer_head_mask.size() != (self.num_heads,):
577
+ raise ValueError(
578
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
579
+ f" {layer_head_mask.size()}"
580
+ )
581
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
582
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
583
+
584
+ if output_attentions:
585
+ # this operation is a bit awkward, but it's required to
586
+ # make sure that attn_weights keeps its gradient.
587
+ # In order to do so, attn_weights have to be reshaped
588
+ # twice and have to be reused in the following
589
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
590
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
591
+ else:
592
+ attn_weights_reshaped = None
593
+
594
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
595
+
596
+ attn_output = torch.bmm(attn_probs, value_states)
597
+
598
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
599
+ raise ValueError(
600
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
601
+ f" {attn_output.size()}"
602
+ )
603
+
604
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
605
+ attn_output = attn_output.transpose(1, 2)
606
+
607
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
608
+ # partitioned across GPUs when using tensor-parallelism.
609
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
610
+
611
+ attn_output = self.out_proj(attn_output)
612
+
613
+ return attn_output, attn_weights_reshaped, past_key_value
614
+
615
+
616
+ class NllbMoeEncoderLayer(nn.Module):
617
+ def __init__(self, config: NllbMoeConfig, is_sparse: bool = False):
618
+ super().__init__()
619
+ self.embed_dim = config.d_model
620
+ self.is_sparse = is_sparse
621
+ self.self_attn = NllbMoeAttention(
622
+ embed_dim=self.embed_dim,
623
+ num_heads=config.encoder_attention_heads,
624
+ dropout=config.attention_dropout,
625
+ )
626
+ self.attn_dropout = nn.Dropout(config.dropout)
627
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
628
+ if not self.is_sparse:
629
+ self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.encoder_ffn_dim)
630
+ else:
631
+ self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.encoder_ffn_dim)
632
+ self.ff_layer_norm = nn.LayerNorm(config.d_model)
633
+ self.ff_dropout = nn.Dropout(config.activation_dropout)
634
+
635
+ def forward(
636
+ self,
637
+ hidden_states: torch.Tensor,
638
+ attention_mask: torch.Tensor,
639
+ layer_head_mask: torch.Tensor,
640
+ output_attentions: bool = False,
641
+ output_router_logits: bool = False,
642
+ ) -> torch.Tensor:
643
+ """
644
+ Args:
645
+ hidden_states (`torch.FloatTensor`):
646
+ input to the layer of shape `(batch, seq_len, embed_dim)`
647
+ attention_mask (`torch.FloatTensor`):
648
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
649
+ large negative values.
650
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
651
+ `(encoder_attention_heads,)`.
652
+ output_attentions (`bool`, *optional*):
653
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
654
+ returned tensors for more detail.
655
+ """
656
+ residual = hidden_states
657
+ hidden_states = self.self_attn_layer_norm(hidden_states)
658
+ hidden_states, attn_weights, _ = self.self_attn(
659
+ hidden_states=hidden_states,
660
+ attention_mask=attention_mask,
661
+ layer_head_mask=layer_head_mask,
662
+ output_attentions=output_attentions,
663
+ )
664
+ hidden_states = self.attn_dropout(hidden_states)
665
+ hidden_states = residual + hidden_states
666
+
667
+ residual = hidden_states
668
+
669
+ hidden_states = self.ff_layer_norm(hidden_states)
670
+ if self.is_sparse:
671
+ hidden_states, router_states = self.ffn(hidden_states, attention_mask)
672
+ else:
673
+ # router_states set to None to track which layers have None gradients.
674
+ hidden_states, router_states = self.ffn(hidden_states), None
675
+
676
+ hidden_states = self.ff_dropout(hidden_states)
677
+
678
+ hidden_states = residual + hidden_states
679
+
680
+ if hidden_states.dtype == torch.float16 and (
681
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
682
+ ):
683
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
684
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
685
+
686
+ outputs = (hidden_states,)
687
+
688
+ if output_attentions:
689
+ outputs += (attn_weights,)
690
+
691
+ if output_router_logits:
692
+ outputs += (router_states,)
693
+
694
+ return outputs
695
+
696
+
697
+ class NllbMoeDecoderLayer(nn.Module):
698
+ def __init__(self, config: NllbMoeConfig, is_sparse: bool = False):
699
+ super().__init__()
700
+ self.embed_dim = config.d_model
701
+ self.is_sparse = is_sparse
702
+ self.self_attn = NllbMoeAttention(
703
+ embed_dim=self.embed_dim,
704
+ num_heads=config.decoder_attention_heads,
705
+ dropout=config.attention_dropout,
706
+ is_decoder=True,
707
+ )
708
+ self.dropout = config.dropout
709
+ self.activation_fn = ACT2FN[config.activation_function]
710
+ self.attn_dropout = nn.Dropout(config.dropout)
711
+
712
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
713
+ self.cross_attention = NllbMoeAttention(
714
+ self.embed_dim, config.decoder_attention_heads, config.attention_dropout, is_decoder=True
715
+ )
716
+ self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
717
+ if not self.is_sparse:
718
+ self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim)
719
+ else:
720
+ self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim)
721
+ self.ff_layer_norm = nn.LayerNorm(config.d_model)
722
+ self.ff_dropout = nn.Dropout(config.activation_dropout)
723
+
724
+ def forward(
725
+ self,
726
+ hidden_states: torch.Tensor,
727
+ attention_mask: Optional[torch.Tensor] = None,
728
+ encoder_hidden_states: Optional[torch.Tensor] = None,
729
+ encoder_attention_mask: Optional[torch.Tensor] = None,
730
+ layer_head_mask: Optional[torch.Tensor] = None,
731
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
732
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
733
+ output_attentions: Optional[bool] = False,
734
+ output_router_logits: Optional[bool] = False,
735
+ use_cache: Optional[bool] = True,
736
+ ) -> torch.Tensor:
737
+ """
738
+ Args:
739
+ hidden_states (`torch.FloatTensor`):
740
+ input to the layer of shape `(batch, seq_len, embed_dim)`
741
+ attention_mask (`torch.FloatTensor`):
742
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
743
+ large negative values.
744
+ encoder_hidden_states (`torch.FloatTensor`):
745
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
746
+ encoder_attention_mask (`torch.FloatTensor`):
747
+ encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
748
+ very large negative values.
749
+ layer_head_mask (`torch.FloatTensor`):
750
+ mask for attention heads in a given layer of size `(encoder_attention_heads,)`.
751
+ cross_attn_layer_head_mask (`torch.FloatTensor`):
752
+ mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`.
753
+ past_key_value (`Tuple(torch.FloatTensor)`):
754
+ cached past key and value projection states
755
+ output_attentions (`bool`, *optional*):
756
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
757
+ returned tensors for more detail.
758
+ """
759
+ residual = hidden_states
760
+ hidden_states = self.self_attn_layer_norm(hidden_states)
761
+
762
+ # Self Attention
763
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
764
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
765
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
766
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
767
+ hidden_states=hidden_states,
768
+ past_key_value=self_attn_past_key_value,
769
+ attention_mask=attention_mask,
770
+ layer_head_mask=layer_head_mask,
771
+ output_attentions=output_attentions,
772
+ )
773
+ hidden_states = self.attn_dropout(hidden_states)
774
+ hidden_states = residual + hidden_states
775
+
776
+ # Cross-Attention Block
777
+ cross_attn_present_key_value = None
778
+ cross_attn_weights = None
779
+ if encoder_hidden_states is not None:
780
+ residual = hidden_states
781
+ hidden_states = self.cross_attention_layer_norm(hidden_states)
782
+
783
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
784
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
785
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(
786
+ hidden_states=hidden_states,
787
+ encoder_hidden_states=encoder_hidden_states,
788
+ past_key_value=cross_attn_past_key_value,
789
+ attention_mask=encoder_attention_mask,
790
+ layer_head_mask=cross_attn_layer_head_mask,
791
+ output_attentions=output_attentions,
792
+ )
793
+ hidden_states = self.attn_dropout(hidden_states)
794
+ hidden_states = residual + hidden_states
795
+
796
+ # add cross-attn to positions 3,4 of present_key_value tuple
797
+ present_key_value += cross_attn_present_key_value
798
+
799
+ # Fully Connected
800
+ residual = hidden_states
801
+
802
+ hidden_states = self.ff_layer_norm(hidden_states)
803
+ if self.is_sparse:
804
+ hidden_states, router_states = self.ffn(hidden_states, attention_mask)
805
+ else:
806
+ hidden_states, router_states = self.ffn(hidden_states), None
807
+
808
+ hidden_states = self.ff_dropout(hidden_states)
809
+
810
+ hidden_states = residual + hidden_states
811
+
812
+ # clamp inf values to enable fp16 training
813
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
814
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
815
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
816
+
817
+ outputs = (hidden_states, present_key_value)
818
+
819
+ if output_attentions:
820
+ outputs += (self_attn_weights, cross_attn_weights)
821
+
822
+ if output_router_logits:
823
+ outputs += (router_states,)
824
+
825
+ return outputs
826
+
827
+
828
+ class NllbMoePreTrainedModel(PreTrainedModel):
829
+ config_class = NllbMoeConfig
830
+ base_model_prefix = "model"
831
+ supports_gradient_checkpointing = True
832
+ _no_split_modules = ["NllbMoeEncoderLayer", "NllbMoeDecoderLayer"]
833
+
834
+ def _init_weights(self, module):
835
+ """Initialize the weights"""
836
+ std = self.config.init_std
837
+ if isinstance(module, nn.Linear):
838
+ module.weight.data.normal_(mean=0.0, std=std)
839
+ if module.bias is not None:
840
+ module.bias.data.zero_()
841
+ elif isinstance(module, nn.Embedding):
842
+ module.weight.data.normal_(mean=0.0, std=std)
843
+ if module.padding_idx is not None:
844
+ module.weight.data[module.padding_idx].zero_()
845
+
846
+
847
+ NLLB_MOE_START_DOCSTRING = r"""
848
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
849
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
850
+ etc.)
851
+
852
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
853
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
854
+ and behavior.
855
+
856
+ Parameters:
857
+ config ([`NllbMoeConfig`]):
858
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
859
+ load the weights associated with the model, only the configuration. Check out the
860
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
861
+ """
862
+
863
+ NLLB_MOE_GENERATION_EXAMPLE = r"""
864
+ Translation example:
865
+
866
+ ```python
867
+ >>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration
868
+
869
+ >>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b")
870
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b")
871
+
872
+ >>> text_to_translate = "Life is like a box of chocolates"
873
+ >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt")
874
+
875
+ >>> # translate to French
876
+ >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn"))
877
+ >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
878
+ ```
879
+ """
880
+
881
+ NLLB_MOE_INPUTS_DOCSTRING = r"""
882
+ Args:
883
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
884
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
885
+ it.
886
+
887
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
888
+ [`PreTrainedTokenizer.__call__`] for details.
889
+
890
+ [What are input IDs?](../glossary#input-ids)
891
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
892
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
893
+
894
+ - 1 for tokens that are **not masked**,
895
+ - 0 for tokens that are **masked**.
896
+
897
+ [What are attention masks?](../glossary#attention-mask)
898
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
899
+ Indices of decoder input sequence tokens in the vocabulary.
900
+
901
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
902
+ [`PreTrainedTokenizer.__call__`] for details.
903
+
904
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
905
+
906
+ NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
907
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
908
+ `past_key_values`).
909
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
910
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
911
+ be used by default.
912
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
913
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
914
+
915
+ - 1 indicates the head is **not masked**,
916
+ - 0 indicates the head is **masked**.
917
+
918
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
919
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
920
+
921
+ - 1 indicates the head is **not masked**,
922
+ - 0 indicates the head is **masked**.
923
+
924
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
925
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
926
+ 1]`:
927
+
928
+ - 1 indicates the head is **not masked**,
929
+ - 0 indicates the head is **masked**.
930
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
931
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
932
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
933
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
934
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
935
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
936
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
937
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
938
+
939
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
940
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
941
+
942
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
943
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
944
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
945
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
946
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
947
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
948
+ than the model's internal embedding lookup matrix.
949
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
950
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
951
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
952
+ input (see `past_key_values`). This is useful if you want more control over how to convert
953
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
954
+
955
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
956
+ of `inputs_embeds`.
957
+ use_cache (`bool`, *optional*):
958
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
959
+ `past_key_values`).
960
+ output_attentions (`bool`, *optional*):
961
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
962
+ tensors for more detail.
963
+ output_hidden_states (`bool`, *optional*):
964
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
965
+ more detail.
966
+ output_router_logits (`bool`, *optional*):
967
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
968
+ should not be returned during inference.
969
+ return_dict (`bool`, *optional*):
970
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
971
+ """
972
+
973
+
974
+ class NllbMoeEncoder(NllbMoePreTrainedModel):
975
+ """
976
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
977
+ [`NllbMoeEncoderLayer`].
978
+
979
+ Args:
980
+ config:
981
+ NllbMoeConfig
982
+ embed_tokens (nn.Embedding):
983
+ output embedding
984
+ """
985
+
986
+ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None):
987
+ super().__init__(config)
988
+
989
+ self.dropout = config.dropout
990
+ self.layerdrop = config.encoder_layerdrop
991
+
992
+ embed_dim = config.d_model
993
+ self.padding_idx = config.pad_token_id
994
+ self.max_source_positions = config.max_position_embeddings
995
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
996
+
997
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
998
+
999
+ if embed_tokens is not None:
1000
+ self.embed_tokens.weight = embed_tokens.weight
1001
+
1002
+ self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(
1003
+ config.max_position_embeddings,
1004
+ embed_dim,
1005
+ self.padding_idx,
1006
+ )
1007
+ sparse_step = config.encoder_sparse_step
1008
+ self.layers = nn.ModuleList()
1009
+ for i in range(config.encoder_layers):
1010
+ is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False
1011
+ self.layers.append(NllbMoeEncoderLayer(config, is_sparse))
1012
+
1013
+ self.layer_norm = nn.LayerNorm(config.d_model)
1014
+
1015
+ self.gradient_checkpointing = False
1016
+ # Initialize weights and apply final processing
1017
+ self.post_init()
1018
+
1019
+ def forward(
1020
+ self,
1021
+ input_ids: Optional[torch.Tensor] = None,
1022
+ attention_mask: Optional[torch.Tensor] = None,
1023
+ head_mask: Optional[torch.Tensor] = None,
1024
+ inputs_embeds: Optional[torch.Tensor] = None,
1025
+ output_attentions: Optional[bool] = None,
1026
+ output_hidden_states: Optional[bool] = None,
1027
+ output_router_logits: Optional[bool] = None,
1028
+ return_dict: Optional[bool] = None,
1029
+ ):
1030
+ r"""
1031
+ Args:
1032
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1033
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1034
+ provide it.
1035
+
1036
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1037
+ [`PreTrainedTokenizer.__call__`] for details.
1038
+
1039
+ [What are input IDs?](../glossary#input-ids)
1040
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1041
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1042
+
1043
+ - 1 for tokens that are **not masked**,
1044
+ - 0 for tokens that are **masked**.
1045
+
1046
+ [What are attention masks?](../glossary#attention-mask)
1047
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
1048
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1049
+
1050
+ - 1 indicates the head is **not masked**,
1051
+ - 0 indicates the head is **masked**.
1052
+
1053
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1054
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
1055
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
1056
+ than the model's internal embedding lookup matrix.
1057
+ output_attentions (`bool`, *optional*):
1058
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1059
+ returned tensors for more detail.
1060
+ output_hidden_states (`bool`, *optional*):
1061
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1062
+ for more detail.
1063
+ output_router_logits (`bool`, *optional*):
1064
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
1065
+ and should not be returned during inference.
1066
+ return_dict (`bool`, *optional*):
1067
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1068
+ """
1069
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1070
+ output_hidden_states = (
1071
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1072
+ )
1073
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1074
+
1075
+ # retrieve input_ids and inputs_embeds
1076
+ if input_ids is not None and inputs_embeds is not None:
1077
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1078
+ elif input_ids is not None:
1079
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1080
+ input_shape = input_ids.size()
1081
+ input_ids = input_ids.view(-1, input_shape[-1])
1082
+ elif inputs_embeds is not None:
1083
+ input_shape = inputs_embeds.size()[:-1]
1084
+ else:
1085
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1086
+
1087
+ if inputs_embeds is None:
1088
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1089
+
1090
+ embed_pos = self.embed_positions(input_ids, inputs_embeds)
1091
+ embed_pos = embed_pos.to(inputs_embeds.device)
1092
+
1093
+ hidden_states = inputs_embeds + embed_pos
1094
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1095
+
1096
+ # expand attention_mask
1097
+ if attention_mask is not None:
1098
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1099
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
1100
+
1101
+ encoder_states = () if output_hidden_states else None
1102
+ all_router_probs = () if output_router_logits else None
1103
+ all_attentions = () if output_attentions else None
1104
+
1105
+ # check if head_mask has a correct number of layers specified if desired
1106
+ if head_mask is not None:
1107
+ if head_mask.size()[0] != len(self.layers):
1108
+ raise ValueError(
1109
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
1110
+ f" {head_mask.size()[0]}."
1111
+ )
1112
+
1113
+ for idx, encoder_layer in enumerate(self.layers):
1114
+ if output_hidden_states:
1115
+ encoder_states = encoder_states + (hidden_states,)
1116
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1117
+ dropout_probability = torch.rand([])
1118
+ if self.training and (dropout_probability < self.layerdrop): # skip the layer
1119
+ layer_outputs = (None, None, None)
1120
+ else:
1121
+ if self.gradient_checkpointing and self.training:
1122
+ layer_outputs = self._gradient_checkpointing_func(
1123
+ encoder_layer.__call__,
1124
+ hidden_states,
1125
+ attention_mask,
1126
+ (head_mask[idx] if head_mask is not None else None),
1127
+ output_attentions,
1128
+ )
1129
+ else:
1130
+ layer_outputs = encoder_layer(
1131
+ hidden_states,
1132
+ attention_mask,
1133
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1134
+ output_attentions=output_attentions,
1135
+ output_router_logits=output_router_logits,
1136
+ )
1137
+
1138
+ hidden_states = layer_outputs[0]
1139
+
1140
+ if output_attentions:
1141
+ all_attentions += (layer_outputs[1],)
1142
+
1143
+ if output_router_logits:
1144
+ all_router_probs += (layer_outputs[-1],)
1145
+
1146
+ last_hidden_state = self.layer_norm(hidden_states)
1147
+
1148
+ if output_hidden_states:
1149
+ encoder_states += (last_hidden_state,)
1150
+
1151
+ if not return_dict:
1152
+ return tuple(
1153
+ v for v in [last_hidden_state, encoder_states, all_attentions, all_router_probs] if v is not None
1154
+ )
1155
+
1156
+ return MoEModelOutput(
1157
+ last_hidden_state=last_hidden_state,
1158
+ hidden_states=encoder_states,
1159
+ attentions=all_attentions,
1160
+ router_probs=all_router_probs,
1161
+ )
1162
+
1163
+
1164
+ class NllbMoeDecoder(NllbMoePreTrainedModel):
1165
+ """
1166
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`]
1167
+
1168
+ Args:
1169
+ config:
1170
+ NllbMoeConfig
1171
+ embed_tokens (nn.Embedding):
1172
+ output embedding
1173
+ """
1174
+
1175
+ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None):
1176
+ super().__init__(config)
1177
+ self.dropout = config.dropout
1178
+ self.layerdrop = config.decoder_layerdrop
1179
+ self.padding_idx = config.pad_token_id
1180
+ self.max_target_positions = config.max_position_embeddings
1181
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
1182
+
1183
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
1184
+
1185
+ if embed_tokens is not None:
1186
+ self.embed_tokens.weight = embed_tokens.weight
1187
+
1188
+ self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(
1189
+ config.max_position_embeddings,
1190
+ config.d_model,
1191
+ self.padding_idx,
1192
+ )
1193
+
1194
+ sparse_step = config.decoder_sparse_step
1195
+ self.layers = nn.ModuleList()
1196
+ for i in range(config.decoder_layers):
1197
+ is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False
1198
+ self.layers.append(NllbMoeDecoderLayer(config, is_sparse))
1199
+
1200
+ self.layer_norm = nn.LayerNorm(config.d_model)
1201
+
1202
+ self.gradient_checkpointing = False
1203
+ # Initialize weights and apply final processing
1204
+ self.post_init()
1205
+
1206
+ def forward(
1207
+ self,
1208
+ input_ids: Optional[torch.Tensor] = None,
1209
+ attention_mask: Optional[torch.Tensor] = None,
1210
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1211
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1212
+ head_mask: Optional[torch.Tensor] = None,
1213
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1214
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1215
+ inputs_embeds: Optional[torch.Tensor] = None,
1216
+ use_cache: Optional[bool] = None,
1217
+ output_attentions: Optional[bool] = None,
1218
+ output_hidden_states: Optional[bool] = None,
1219
+ output_router_logits: Optional[bool] = None,
1220
+ return_dict: Optional[bool] = None,
1221
+ ):
1222
+ r"""
1223
+ Args:
1224
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1225
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1226
+ provide it.
1227
+
1228
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1229
+ [`PreTrainedTokenizer.__call__`] for details.
1230
+
1231
+ [What are input IDs?](../glossary#input-ids)
1232
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1233
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1234
+
1235
+ - 1 for tokens that are **not masked**,
1236
+ - 0 for tokens that are **masked**.
1237
+
1238
+ [What are attention masks?](../glossary#attention-mask)
1239
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
1240
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1241
+ of the decoder.
1242
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
1243
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
1244
+ selected in `[0, 1]`:
1245
+
1246
+ - 1 for tokens that are **not masked**,
1247
+ - 0 for tokens that are **masked**.
1248
+
1249
+ [What are attention masks?](../glossary#attention-mask)
1250
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1251
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1252
+
1253
+ - 1 indicates the head is **not masked**,
1254
+ - 0 indicates the head is **masked**.
1255
+
1256
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1257
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
1258
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
1259
+
1260
+ - 1 indicates the head is **not masked**,
1261
+ - 0 indicates the head is **masked**.
1262
+
1263
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1264
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1265
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1266
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1267
+
1268
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1269
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1270
+
1271
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1272
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1273
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1274
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1275
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
1276
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
1277
+ than the model's internal embedding lookup matrix.
1278
+ output_attentions (`bool`, *optional*):
1279
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1280
+ returned tensors for more detail.
1281
+ output_hidden_states (`bool`, *optional*):
1282
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1283
+ for more detail.
1284
+ output_router_logits (`bool`, *optional*):
1285
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
1286
+ and should not be returned during inference.
1287
+ return_dict (`bool`, *optional*):
1288
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1289
+ """
1290
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1291
+ output_hidden_states = (
1292
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1293
+ )
1294
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1295
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1296
+
1297
+ # retrieve input_ids and inputs_embeds
1298
+ if input_ids is not None and inputs_embeds is not None:
1299
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1300
+ elif input_ids is not None:
1301
+ input_shape = input_ids.size()
1302
+ input_ids = input_ids.view(-1, input_shape[-1])
1303
+ elif inputs_embeds is not None:
1304
+ input_shape = inputs_embeds.size()[:-1]
1305
+ else:
1306
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1307
+
1308
+ # past_key_values_length
1309
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1310
+
1311
+ if inputs_embeds is None:
1312
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1313
+
1314
+ # create causal mask
1315
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1316
+ combined_attention_mask = _prepare_4d_causal_attention_mask(
1317
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
1318
+ )
1319
+
1320
+ # expand encoder attention mask
1321
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1322
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1323
+ encoder_attention_mask = _prepare_4d_attention_mask(
1324
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
1325
+ )
1326
+
1327
+ # embed positions
1328
+ positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
1329
+ positions = positions.to(inputs_embeds.device)
1330
+
1331
+ hidden_states = inputs_embeds + positions
1332
+
1333
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1334
+
1335
+ if self.gradient_checkpointing and self.training:
1336
+ if use_cache:
1337
+ logger.warning_once(
1338
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..."
1339
+ )
1340
+ use_cache = False
1341
+
1342
+ # decoder layers
1343
+ all_hidden_states = () if output_hidden_states else None
1344
+ all_self_attns = () if output_attentions else None
1345
+ all_router_probs = () if output_router_logits else None
1346
+ all_cross_attentions = () if output_attentions else None
1347
+ present_key_value_states = () if use_cache else None
1348
+
1349
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1350
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1351
+ if attn_mask is not None:
1352
+ if attn_mask.size()[0] != len(self.layers):
1353
+ raise ValueError(
1354
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1355
+ f" {head_mask.size()[0]}."
1356
+ )
1357
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
1358
+
1359
+ for idx, decoder_layer in enumerate(self.layers):
1360
+ if output_hidden_states:
1361
+ all_hidden_states += (hidden_states,)
1362
+
1363
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1364
+ dropout_probability = torch.rand([])
1365
+
1366
+ skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
1367
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
1368
+ layer_head_mask = head_mask[idx] if head_mask is not None else None
1369
+ cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1370
+
1371
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1372
+
1373
+ # under deepspeed zero3 all gpus must run in sync
1374
+ if self.gradient_checkpointing and self.training:
1375
+ if use_cache:
1376
+ logger.warning_once(
1377
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1378
+ )
1379
+ use_cache = False
1380
+ layer_outputs = self._gradient_checkpointing_func(
1381
+ decoder_layer.forward,
1382
+ hidden_states,
1383
+ combined_attention_mask,
1384
+ encoder_hidden_states,
1385
+ encoder_attention_mask,
1386
+ layer_head_mask,
1387
+ cross_attn_layer_head_mask,
1388
+ None, # past_key_value is always None with gradient checkpointing
1389
+ use_cache,
1390
+ output_attentions,
1391
+ )
1392
+ else:
1393
+ layer_outputs = decoder_layer(
1394
+ hidden_states,
1395
+ attention_mask=combined_attention_mask,
1396
+ encoder_hidden_states=encoder_hidden_states,
1397
+ encoder_attention_mask=encoder_attention_mask,
1398
+ layer_head_mask=layer_head_mask,
1399
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
1400
+ past_key_value=past_key_value,
1401
+ use_cache=use_cache,
1402
+ output_attentions=output_attentions,
1403
+ output_router_logits=output_router_logits,
1404
+ )
1405
+
1406
+ hidden_states = layer_outputs[0]
1407
+
1408
+ if skip_the_layer:
1409
+ continue
1410
+
1411
+ if use_cache:
1412
+ present_key_value_states += (layer_outputs[1],)
1413
+
1414
+ if output_attentions:
1415
+ all_self_attns += (layer_outputs[2],)
1416
+ all_cross_attentions += (layer_outputs[3],)
1417
+
1418
+ if output_router_logits:
1419
+ all_router_probs += (layer_outputs[-1],)
1420
+
1421
+ hidden_states = self.layer_norm(hidden_states)
1422
+
1423
+ # Add last layer
1424
+ if output_hidden_states:
1425
+ all_hidden_states += (hidden_states,)
1426
+
1427
+ if not return_dict:
1428
+ return tuple(
1429
+ v
1430
+ for v in [
1431
+ hidden_states,
1432
+ present_key_value_states,
1433
+ all_hidden_states,
1434
+ all_self_attns,
1435
+ all_cross_attentions,
1436
+ all_router_probs,
1437
+ ]
1438
+ if v is not None
1439
+ )
1440
+ return MoEModelOutputWithPastAndCrossAttentions(
1441
+ last_hidden_state=hidden_states,
1442
+ past_key_values=present_key_value_states,
1443
+ hidden_states=all_hidden_states,
1444
+ attentions=all_self_attns,
1445
+ cross_attentions=all_cross_attentions,
1446
+ router_probs=all_router_probs,
1447
+ )
1448
+
1449
+
1450
+ @add_start_docstrings(
1451
+ "The bare NllbMoe Model outputting raw hidden-states without any specific head on top.",
1452
+ NLLB_MOE_START_DOCSTRING,
1453
+ )
1454
+ class NllbMoeModel(NllbMoePreTrainedModel):
1455
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1456
+
1457
+ def __init__(self, config: NllbMoeConfig):
1458
+ super().__init__(config)
1459
+
1460
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1461
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1462
+
1463
+ self.encoder = NllbMoeEncoder(config, self.shared)
1464
+ self.decoder = NllbMoeDecoder(config, self.shared)
1465
+
1466
+ # Initialize weights and apply final processing
1467
+ self.post_init()
1468
+
1469
+ def get_input_embeddings(self):
1470
+ return self.shared
1471
+
1472
+ def set_input_embeddings(self, value):
1473
+ self.shared = value
1474
+ self.encoder.embed_tokens = self.shared
1475
+ self.decoder.embed_tokens = self.shared
1476
+
1477
+ def _tie_weights(self):
1478
+ if self.config.tie_word_embeddings:
1479
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
1480
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
1481
+
1482
+ def get_encoder(self):
1483
+ return self.encoder
1484
+
1485
+ def get_decoder(self):
1486
+ return self.decoder
1487
+
1488
+ @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
1489
+ @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
1490
+ @replace_return_docstrings(output_type=Seq2SeqMoEModelOutput, config_class=_CONFIG_FOR_DOC)
1491
+ def forward(
1492
+ self,
1493
+ input_ids: Optional[torch.LongTensor] = None,
1494
+ attention_mask: Optional[torch.Tensor] = None,
1495
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1496
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1497
+ head_mask: Optional[torch.Tensor] = None,
1498
+ decoder_head_mask: Optional[torch.Tensor] = None,
1499
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1500
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1501
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1502
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1503
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1504
+ use_cache: Optional[bool] = None,
1505
+ output_attentions: Optional[bool] = None,
1506
+ output_hidden_states: Optional[bool] = None,
1507
+ output_router_logits: Optional[bool] = None,
1508
+ return_dict: Optional[bool] = None,
1509
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqMoEModelOutput]:
1510
+ r"""
1511
+ Returns:
1512
+
1513
+ Example:
1514
+
1515
+ ```python
1516
+ >>> from transformers import AutoTokenizer, NllbMoeModel
1517
+
1518
+ >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
1519
+ >>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
1520
+
1521
+ >>> input_ids = tokenizer(
1522
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
1523
+ ... ).input_ids # Batch size 1
1524
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
1525
+
1526
+ >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel
1527
+ >>> decoder_input_ids = model._shift_right(decoder_input_ids)
1528
+
1529
+ >>> # forward pass
1530
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
1531
+ >>> last_hidden_states = outputs.last_hidden_state
1532
+ ```"""
1533
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1534
+ if encoder_outputs is None:
1535
+ encoder_outputs = self.encoder(
1536
+ input_ids=input_ids,
1537
+ attention_mask=attention_mask,
1538
+ head_mask=head_mask,
1539
+ inputs_embeds=inputs_embeds,
1540
+ output_attentions=output_attentions,
1541
+ output_hidden_states=output_hidden_states,
1542
+ output_router_logits=output_router_logits,
1543
+ return_dict=return_dict,
1544
+ )
1545
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1546
+ elif return_dict and not isinstance(encoder_outputs, MoEModelOutput):
1547
+ encoder_outputs = MoEModelOutput(
1548
+ last_hidden_state=encoder_outputs[0],
1549
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1550
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1551
+ router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
1552
+ )
1553
+
1554
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1555
+ decoder_outputs = self.decoder(
1556
+ input_ids=decoder_input_ids,
1557
+ attention_mask=decoder_attention_mask,
1558
+ encoder_hidden_states=encoder_outputs[0],
1559
+ encoder_attention_mask=attention_mask,
1560
+ head_mask=decoder_head_mask,
1561
+ cross_attn_head_mask=cross_attn_head_mask,
1562
+ past_key_values=past_key_values,
1563
+ inputs_embeds=decoder_inputs_embeds,
1564
+ use_cache=use_cache,
1565
+ output_attentions=output_attentions,
1566
+ output_hidden_states=output_hidden_states,
1567
+ output_router_logits=output_router_logits,
1568
+ return_dict=return_dict,
1569
+ )
1570
+
1571
+ if not return_dict:
1572
+ return decoder_outputs + encoder_outputs
1573
+
1574
+ return Seq2SeqMoEModelOutput(
1575
+ past_key_values=decoder_outputs.past_key_values,
1576
+ cross_attentions=decoder_outputs.cross_attentions,
1577
+ last_hidden_state=decoder_outputs.last_hidden_state,
1578
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1579
+ encoder_hidden_states=encoder_outputs.hidden_states,
1580
+ decoder_hidden_states=decoder_outputs.hidden_states,
1581
+ encoder_attentions=encoder_outputs.attentions,
1582
+ decoder_attentions=decoder_outputs.attentions,
1583
+ encoder_router_logits=encoder_outputs.router_probs,
1584
+ decoder_router_logits=decoder_outputs.router_probs,
1585
+ )
1586
+
1587
+
1588
+ @add_start_docstrings(
1589
+ "The NllbMoe Model with a language modeling head. Can be used for summarization.", NLLB_MOE_START_DOCSTRING
1590
+ )
1591
+ class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel):
1592
+ base_model_prefix = "model"
1593
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
1594
+
1595
+ def __init__(self, config: NllbMoeConfig):
1596
+ super().__init__(config)
1597
+ self.model = NllbMoeModel(config)
1598
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
1599
+
1600
+ self.router_z_loss_coef = config.router_z_loss_coef
1601
+ self.router_aux_loss_coef = config.router_aux_loss_coef
1602
+ # Initialize weights and apply final processing
1603
+ self.post_init()
1604
+
1605
+ def get_encoder(self):
1606
+ return self.model.get_encoder()
1607
+
1608
+ def get_decoder(self):
1609
+ return self.model.get_decoder()
1610
+
1611
+ def get_output_embeddings(self):
1612
+ return self.lm_head
1613
+
1614
+ def set_output_embeddings(self, new_embeddings):
1615
+ self.lm_head = new_embeddings
1616
+
1617
+ @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
1618
+ @replace_return_docstrings(output_type=Seq2SeqMoEOutput, config_class=_CONFIG_FOR_DOC)
1619
+ @add_end_docstrings(NLLB_MOE_GENERATION_EXAMPLE)
1620
+ def forward(
1621
+ self,
1622
+ input_ids: Optional[torch.LongTensor] = None,
1623
+ attention_mask: Optional[torch.Tensor] = None,
1624
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1625
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1626
+ head_mask: Optional[torch.Tensor] = None,
1627
+ decoder_head_mask: Optional[torch.Tensor] = None,
1628
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1629
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1630
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1631
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1632
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1633
+ labels: Optional[torch.LongTensor] = None,
1634
+ use_cache: Optional[bool] = None,
1635
+ output_attentions: Optional[bool] = None,
1636
+ output_hidden_states: Optional[bool] = None,
1637
+ output_router_logits: Optional[bool] = None,
1638
+ return_dict: Optional[bool] = None,
1639
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqMoEOutput]:
1640
+ r"""
1641
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1642
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1643
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1644
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1645
+
1646
+ Returns:
1647
+ """
1648
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1649
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1650
+ output_router_logits = (
1651
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1652
+ )
1653
+ if labels is not None:
1654
+ if decoder_input_ids is None:
1655
+ decoder_input_ids = shift_tokens_right(
1656
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1657
+ )
1658
+
1659
+ outputs = self.model(
1660
+ input_ids,
1661
+ attention_mask=attention_mask,
1662
+ decoder_input_ids=decoder_input_ids,
1663
+ encoder_outputs=encoder_outputs,
1664
+ decoder_attention_mask=decoder_attention_mask,
1665
+ head_mask=head_mask,
1666
+ decoder_head_mask=decoder_head_mask,
1667
+ cross_attn_head_mask=cross_attn_head_mask,
1668
+ past_key_values=past_key_values,
1669
+ inputs_embeds=inputs_embeds,
1670
+ decoder_inputs_embeds=decoder_inputs_embeds,
1671
+ use_cache=use_cache,
1672
+ output_attentions=output_attentions,
1673
+ output_hidden_states=output_hidden_states,
1674
+ output_router_logits=output_router_logits,
1675
+ return_dict=return_dict,
1676
+ )
1677
+ lm_logits = self.lm_head(outputs[0])
1678
+
1679
+ loss = None
1680
+ encoder_aux_loss = None
1681
+ decoder_aux_loss = None
1682
+
1683
+ if labels is not None:
1684
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1685
+ # todo check in the config if router loss enables
1686
+
1687
+ if output_router_logits:
1688
+ encoder_router_logits = outputs[-1]
1689
+ decoder_router_logits = outputs[3 if output_attentions else 4]
1690
+
1691
+ # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
1692
+ encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_router_logits)
1693
+ encoder_aux_loss = load_balancing_loss_func(encoder_router_logits, encoder_expert_indexes)
1694
+
1695
+ decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_router_logits)
1696
+ decoder_aux_loss = load_balancing_loss_func(decoder_router_logits, decoder_expert_indexes)
1697
+
1698
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
1699
+
1700
+ if output_router_logits and labels is not None:
1701
+ aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss)
1702
+ loss = loss + aux_loss
1703
+
1704
+ output = (loss,) if loss is not None else ()
1705
+ if not return_dict:
1706
+ output += (lm_logits,)
1707
+ if output_router_logits: # only return the loss if they are not None
1708
+ output += (
1709
+ encoder_aux_loss,
1710
+ decoder_aux_loss,
1711
+ *outputs[1:],
1712
+ )
1713
+ else:
1714
+ output += outputs[1:]
1715
+
1716
+ return output
1717
+
1718
+ return Seq2SeqMoEOutput(
1719
+ loss=loss,
1720
+ logits=lm_logits,
1721
+ past_key_values=outputs.past_key_values,
1722
+ cross_attentions=outputs.cross_attentions,
1723
+ encoder_aux_loss=encoder_aux_loss,
1724
+ decoder_aux_loss=decoder_aux_loss,
1725
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1726
+ encoder_hidden_states=outputs.encoder_hidden_states,
1727
+ decoder_hidden_states=outputs.decoder_hidden_states,
1728
+ encoder_attentions=outputs.encoder_attentions,
1729
+ decoder_attentions=outputs.decoder_attentions,
1730
+ encoder_router_logits=outputs.encoder_router_logits,
1731
+ decoder_router_logits=outputs.decoder_router_logits,
1732
+ )
1733
+
1734
+ def _unpack_router_logits(self, router_outputs):
1735
+ total_router_logits = []
1736
+ total_expert_indexes = []
1737
+ for router_output in router_outputs:
1738
+ if router_output is not None:
1739
+ router_logits, expert_indexes = router_output
1740
+ total_router_logits.append(router_logits)
1741
+ total_expert_indexes.append(expert_indexes)
1742
+
1743
+ total_router_logits = torch.cat(total_router_logits, dim=1) if len(total_router_logits) > 0 else None
1744
+ total_expert_indexes = torch.stack(total_expert_indexes, dim=1) if len(total_expert_indexes) > 0 else None
1745
+ return total_router_logits, total_expert_indexes
1746
+
1747
+ # Copied from transfomers.models.switch_transformers.SwitchTransformersForConditionalGeneration.prepare_inputs_for_generation
1748
+ def prepare_inputs_for_generation(
1749
+ self,
1750
+ decoder_input_ids,
1751
+ past_key_values=None,
1752
+ attention_mask=None,
1753
+ head_mask=None,
1754
+ decoder_head_mask=None,
1755
+ cross_attn_head_mask=None,
1756
+ use_cache=None,
1757
+ encoder_outputs=None,
1758
+ **kwargs,
1759
+ ):
1760
+ # cut decoder_input_ids if past is used
1761
+ if past_key_values is not None:
1762
+ past_length = past_key_values[0][0].shape[2]
1763
+
1764
+ # Some generation methods already pass only the last input ID
1765
+ if decoder_input_ids.shape[1] > past_length:
1766
+ remove_prefix_length = past_length
1767
+ else:
1768
+ # Default to old behavior: keep only final ID
1769
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1770
+
1771
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1772
+
1773
+ return {
1774
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1775
+ "encoder_outputs": encoder_outputs,
1776
+ "past_key_values": past_key_values,
1777
+ "decoder_input_ids": decoder_input_ids,
1778
+ "attention_mask": attention_mask,
1779
+ "head_mask": head_mask,
1780
+ "decoder_head_mask": decoder_head_mask,
1781
+ "cross_attn_head_mask": cross_attn_head_mask,
1782
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1783
+ }
1784
+
1785
+ @staticmethod
1786
+ def _reorder_cache(past_key_values, beam_idx):
1787
+ reordered_past = ()
1788
+ for layer_past in past_key_values:
1789
+ reordered_past += (
1790
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1791
+ )
1792
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/owlvit/__init__.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_owlvit": [
29
+ "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "OwlViTConfig",
31
+ "OwlViTOnnxConfig",
32
+ "OwlViTTextConfig",
33
+ "OwlViTVisionConfig",
34
+ ],
35
+ "processing_owlvit": ["OwlViTProcessor"],
36
+ }
37
+
38
+
39
+ try:
40
+ if not is_vision_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["feature_extraction_owlvit"] = ["OwlViTFeatureExtractor"]
46
+ _import_structure["image_processing_owlvit"] = ["OwlViTImageProcessor"]
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ _import_structure["modeling_owlvit"] = [
55
+ "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
56
+ "OwlViTModel",
57
+ "OwlViTPreTrainedModel",
58
+ "OwlViTTextModel",
59
+ "OwlViTVisionModel",
60
+ "OwlViTForObjectDetection",
61
+ ]
62
+
63
+ if TYPE_CHECKING:
64
+ from .configuration_owlvit import (
65
+ OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
66
+ OwlViTConfig,
67
+ OwlViTOnnxConfig,
68
+ OwlViTTextConfig,
69
+ OwlViTVisionConfig,
70
+ )
71
+ from .processing_owlvit import OwlViTProcessor
72
+
73
+ try:
74
+ if not is_vision_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .feature_extraction_owlvit import OwlViTFeatureExtractor
80
+ from .image_processing_owlvit import OwlViTImageProcessor
81
+
82
+ try:
83
+ if not is_torch_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_owlvit import (
89
+ OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
90
+ OwlViTForObjectDetection,
91
+ OwlViTModel,
92
+ OwlViTPreTrainedModel,
93
+ OwlViTTextModel,
94
+ OwlViTVisionModel,
95
+ )
96
+
97
+ else:
98
+ import sys
99
+
100
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.55 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/configuration_owlvit.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/convert_owlvit_original_flax_to_hf.cpython-310.pyc ADDED
Binary file (9.53 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/feature_extraction_owlvit.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/image_processing_owlvit.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/modeling_owlvit.cpython-310.pyc ADDED
Binary file (55.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/__pycache__/processing_owlvit.cpython-310.pyc ADDED
Binary file (9.52 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/owlvit/configuration_owlvit.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ OWL-ViT model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import TensorType
25
+
26
+ from ...configuration_utils import PretrainedConfig
27
+ from ...onnx import OnnxConfig
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ from ..deprecated._archive_maps import OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
35
+
36
+
37
+ class OwlViTTextConfig(PretrainedConfig):
38
+ r"""
39
+ This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an
40
+ OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a
41
+ configuration with the defaults will yield a similar configuration to that of the OwlViT
42
+ [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+
48
+ Args:
49
+ vocab_size (`int`, *optional*, defaults to 49408):
50
+ Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented
51
+ by the `inputs_ids` passed when calling [`OwlViTTextModel`].
52
+ hidden_size (`int`, *optional*, defaults to 512):
53
+ Dimensionality of the encoder layers and the pooler layer.
54
+ intermediate_size (`int`, *optional*, defaults to 2048):
55
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
56
+ num_hidden_layers (`int`, *optional*, defaults to 12):
57
+ Number of hidden layers in the Transformer encoder.
58
+ num_attention_heads (`int`, *optional*, defaults to 8):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 16):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
64
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
65
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
67
+ The epsilon used by the layer normalization layers.
68
+ attention_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for the attention probabilities.
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ initializer_factor (`float`, *optional*, defaults to 1.0):
73
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
74
+ testing).
75
+ pad_token_id (`int`, *optional*, defaults to 0):
76
+ The id of the padding token in the input sequences.
77
+ bos_token_id (`int`, *optional*, defaults to 49406):
78
+ The id of the beginning-of-sequence token in the input sequences.
79
+ eos_token_id (`int`, *optional*, defaults to 49407):
80
+ The id of the end-of-sequence token in the input sequences.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ >>> from transformers import OwlViTTextConfig, OwlViTTextModel
86
+
87
+ >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration
88
+ >>> configuration = OwlViTTextConfig()
89
+
90
+ >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration
91
+ >>> model = OwlViTTextModel(configuration)
92
+
93
+ >>> # Accessing the model configuration
94
+ >>> configuration = model.config
95
+ ```"""
96
+
97
+ model_type = "owlvit_text_model"
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=49408,
102
+ hidden_size=512,
103
+ intermediate_size=2048,
104
+ num_hidden_layers=12,
105
+ num_attention_heads=8,
106
+ max_position_embeddings=16,
107
+ hidden_act="quick_gelu",
108
+ layer_norm_eps=1e-5,
109
+ attention_dropout=0.0,
110
+ initializer_range=0.02,
111
+ initializer_factor=1.0,
112
+ pad_token_id=0,
113
+ bos_token_id=49406,
114
+ eos_token_id=49407,
115
+ **kwargs,
116
+ ):
117
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
118
+
119
+ self.vocab_size = vocab_size
120
+ self.hidden_size = hidden_size
121
+ self.intermediate_size = intermediate_size
122
+ self.num_hidden_layers = num_hidden_layers
123
+ self.num_attention_heads = num_attention_heads
124
+ self.max_position_embeddings = max_position_embeddings
125
+ self.hidden_act = hidden_act
126
+ self.layer_norm_eps = layer_norm_eps
127
+ self.attention_dropout = attention_dropout
128
+ self.initializer_range = initializer_range
129
+ self.initializer_factor = initializer_factor
130
+
131
+ @classmethod
132
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
133
+ cls._set_token_in_kwargs(kwargs)
134
+
135
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
136
+
137
+ # get the text config dict if we are loading from OwlViTConfig
138
+ if config_dict.get("model_type") == "owlvit":
139
+ config_dict = config_dict["text_config"]
140
+
141
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
142
+ logger.warning(
143
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
144
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
145
+ )
146
+
147
+ return cls.from_dict(config_dict, **kwargs)
148
+
149
+
150
+ class OwlViTVisionConfig(PretrainedConfig):
151
+ r"""
152
+ This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate
153
+ an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a
154
+ configuration with the defaults will yield a similar configuration to that of the OWL-ViT
155
+ [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
156
+
157
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
158
+ documentation from [`PretrainedConfig`] for more information.
159
+
160
+ Args:
161
+ hidden_size (`int`, *optional*, defaults to 768):
162
+ Dimensionality of the encoder layers and the pooler layer.
163
+ intermediate_size (`int`, *optional*, defaults to 3072):
164
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
165
+ num_hidden_layers (`int`, *optional*, defaults to 12):
166
+ Number of hidden layers in the Transformer encoder.
167
+ num_attention_heads (`int`, *optional*, defaults to 12):
168
+ Number of attention heads for each attention layer in the Transformer encoder.
169
+ num_channels (`int`, *optional*, defaults to 3):
170
+ Number of channels in the input images.
171
+ image_size (`int`, *optional*, defaults to 768):
172
+ The size (resolution) of each image.
173
+ patch_size (`int`, *optional*, defaults to 32):
174
+ The size (resolution) of each patch.
175
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
176
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
177
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
178
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
179
+ The epsilon used by the layer normalization layers.
180
+ attention_dropout (`float`, *optional*, defaults to 0.0):
181
+ The dropout ratio for the attention probabilities.
182
+ initializer_range (`float`, *optional*, defaults to 0.02):
183
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
184
+ initializer_factor (`float`, *optional*, defaults to 1.0):
185
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
186
+ testing).
187
+
188
+ Example:
189
+
190
+ ```python
191
+ >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel
192
+
193
+ >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration
194
+ >>> configuration = OwlViTVisionConfig()
195
+
196
+ >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration
197
+ >>> model = OwlViTVisionModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```"""
202
+
203
+ model_type = "owlvit_vision_model"
204
+
205
+ def __init__(
206
+ self,
207
+ hidden_size=768,
208
+ intermediate_size=3072,
209
+ num_hidden_layers=12,
210
+ num_attention_heads=12,
211
+ num_channels=3,
212
+ image_size=768,
213
+ patch_size=32,
214
+ hidden_act="quick_gelu",
215
+ layer_norm_eps=1e-5,
216
+ attention_dropout=0.0,
217
+ initializer_range=0.02,
218
+ initializer_factor=1.0,
219
+ **kwargs,
220
+ ):
221
+ super().__init__(**kwargs)
222
+
223
+ self.hidden_size = hidden_size
224
+ self.intermediate_size = intermediate_size
225
+ self.num_hidden_layers = num_hidden_layers
226
+ self.num_attention_heads = num_attention_heads
227
+ self.num_channels = num_channels
228
+ self.image_size = image_size
229
+ self.patch_size = patch_size
230
+ self.hidden_act = hidden_act
231
+ self.layer_norm_eps = layer_norm_eps
232
+ self.attention_dropout = attention_dropout
233
+ self.initializer_range = initializer_range
234
+ self.initializer_factor = initializer_factor
235
+
236
+ @classmethod
237
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
238
+ cls._set_token_in_kwargs(kwargs)
239
+
240
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
241
+
242
+ # get the vision config dict if we are loading from OwlViTConfig
243
+ if config_dict.get("model_type") == "owlvit":
244
+ config_dict = config_dict["vision_config"]
245
+
246
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
247
+ logger.warning(
248
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
249
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
250
+ )
251
+
252
+ return cls.from_dict(config_dict, **kwargs)
253
+
254
+
255
+ class OwlViTConfig(PretrainedConfig):
256
+ r"""
257
+ [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to
258
+ instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model
259
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT
260
+ [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
261
+
262
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
263
+ documentation from [`PretrainedConfig`] for more information.
264
+
265
+ Args:
266
+ text_config (`dict`, *optional*):
267
+ Dictionary of configuration options used to initialize [`OwlViTTextConfig`].
268
+ vision_config (`dict`, *optional*):
269
+ Dictionary of configuration options used to initialize [`OwlViTVisionConfig`].
270
+ projection_dim (`int`, *optional*, defaults to 512):
271
+ Dimensionality of text and vision projection layers.
272
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
273
+ The inital value of the *logit_scale* parameter. Default is used as per the original OWL-ViT
274
+ implementation.
275
+ return_dict (`bool`, *optional*, defaults to `True`):
276
+ Whether or not the model should return a dictionary. If `False`, returns a tuple.
277
+ kwargs (*optional*):
278
+ Dictionary of keyword arguments.
279
+ """
280
+
281
+ model_type = "owlvit"
282
+
283
+ def __init__(
284
+ self,
285
+ text_config=None,
286
+ vision_config=None,
287
+ projection_dim=512,
288
+ logit_scale_init_value=2.6592,
289
+ return_dict=True,
290
+ **kwargs,
291
+ ):
292
+ super().__init__(**kwargs)
293
+
294
+ if text_config is None:
295
+ text_config = {}
296
+ logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.")
297
+
298
+ if vision_config is None:
299
+ vision_config = {}
300
+ logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.")
301
+
302
+ self.text_config = OwlViTTextConfig(**text_config)
303
+ self.vision_config = OwlViTVisionConfig(**vision_config)
304
+
305
+ self.projection_dim = projection_dim
306
+ self.logit_scale_init_value = logit_scale_init_value
307
+ self.return_dict = return_dict
308
+ self.initializer_factor = 1.0
309
+
310
+ @classmethod
311
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
312
+ cls._set_token_in_kwargs(kwargs)
313
+
314
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
315
+
316
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
317
+ logger.warning(
318
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
319
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
320
+ )
321
+
322
+ return cls.from_dict(config_dict, **kwargs)
323
+
324
+ @classmethod
325
+ def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs):
326
+ r"""
327
+ Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision
328
+ model configuration.
329
+
330
+ Returns:
331
+ [`OwlViTConfig`]: An instance of a configuration object
332
+ """
333
+ config_dict = {}
334
+ config_dict["text_config"] = text_config
335
+ config_dict["vision_config"] = vision_config
336
+
337
+ return cls.from_dict(config_dict, **kwargs)
338
+
339
+
340
+ class OwlViTOnnxConfig(OnnxConfig):
341
+ @property
342
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
343
+ return OrderedDict(
344
+ [
345
+ ("input_ids", {0: "batch", 1: "sequence"}),
346
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
347
+ ("attention_mask", {0: "batch", 1: "sequence"}),
348
+ ]
349
+ )
350
+
351
+ @property
352
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
353
+ return OrderedDict(
354
+ [
355
+ ("logits_per_image", {0: "batch"}),
356
+ ("logits_per_text", {0: "batch"}),
357
+ ("text_embeds", {0: "batch"}),
358
+ ("image_embeds", {0: "batch"}),
359
+ ]
360
+ )
361
+
362
+ @property
363
+ def atol_for_validation(self) -> float:
364
+ return 1e-4
365
+
366
+ def generate_dummy_inputs(
367
+ self,
368
+ processor: "ProcessorMixin",
369
+ batch_size: int = -1,
370
+ seq_length: int = -1,
371
+ framework: Optional["TensorType"] = None,
372
+ ) -> Mapping[str, Any]:
373
+ text_input_dict = super().generate_dummy_inputs(
374
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
375
+ )
376
+ image_input_dict = super().generate_dummy_inputs(
377
+ processor.image_processor, batch_size=batch_size, framework=framework
378
+ )
379
+ return {**text_input_dict, **image_input_dict}
380
+
381
+ @property
382
+ def default_onnx_opset(self) -> int:
383
+ return 14