applied-ai-018 commited on
Commit
19c111d
·
verified ·
1 Parent(s): c75e8d3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/15.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/19.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-res.json +1 -0
  4. lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-res.json +1 -0
  5. lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-greedy_until +1 -0
  6. lm-evaluation-harness/tests/testdata/math_prealgebra-v1-greedy_until +1 -0
  7. lm-evaluation-harness/tests/testdata/math_precalc-v1-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/pile_freelaw-v1-loglikelihood_rolling +1 -0
  9. lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-res.json +1 -0
  10. venv/lib/python3.10/site-packages/transformers/models/convnext/__init__.py +102 -0
  11. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py +142 -0
  19. venv/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py +243 -0
  20. venv/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py +33 -0
  21. venv/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py +338 -0
  22. venv/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py +551 -0
  23. venv/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py +667 -0
  24. venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/mbart50/__init__.py +58 -0
  26. venv/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/tokenization_mbart50.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/tokenization_mbart50_fast.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50.py +354 -0
  30. venv/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50_fast.py +259 -0
  31. venv/lib/python3.10/site-packages/transformers/models/pegasus/__init__.py +140 -0
  32. venv/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_tf_pegasus.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/pegasus/configuration_pegasus.py +164 -0
  34. venv/lib/python3.10/site-packages/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py +131 -0
  35. venv/lib/python3.10/site-packages/transformers/models/pegasus/modeling_flax_pegasus.py +1530 -0
  36. venv/lib/python3.10/site-packages/transformers/models/pegasus/modeling_pegasus.py +1693 -0
  37. venv/lib/python3.10/site-packages/transformers/models/pegasus/modeling_tf_pegasus.py +1572 -0
  38. venv/lib/python3.10/site-packages/transformers/models/pegasus/tokenization_pegasus.py +285 -0
  39. venv/lib/python3.10/site-packages/transformers/models/pegasus/tokenization_pegasus_fast.py +217 -0
  40. venv/lib/python3.10/site-packages/transformers/models/poolformer/__init__.py +83 -0
  41. venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/configuration_poolformer.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/convert_poolformer_original_to_pytorch.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/feature_extraction_poolformer.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/image_processing_poolformer.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/modeling_poolformer.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/poolformer/configuration_poolformer.py +147 -0
  48. venv/lib/python3.10/site-packages/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py +214 -0
  49. venv/lib/python3.10/site-packages/transformers/models/poolformer/feature_extraction_poolformer.py +33 -0
  50. venv/lib/python3.10/site-packages/transformers/models/poolformer/image_processing_poolformer.py +377 -0
ckpts/universal/global_step20/zero/15.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2797d15dd0a3fdf23677f17138c83cad3de3fdcec0f84d272961324501a54c3
3
+ size 50332843
ckpts/universal/global_step20/zero/19.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63b97f36159ab0725204bc620192509cca489c08622fe379cc7b3ee164fcd9ac
3
+ size 33555612
lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_mt_en": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_en": 0}}
lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_standard_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_standard_cloze": 0}}
lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 2aa9ae43ee9dbb2457525247d7b65358632c5eaa9cbfc40cf95a4f17f5d942ad
lm-evaluation-harness/tests/testdata/math_prealgebra-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 752cdf343d7152e476b0273065024f6ea0e0f47ea385c6bdf9067736cb39724a
lm-evaluation-harness/tests/testdata/math_precalc-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_precalc": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_precalc": 1}}
lm-evaluation-harness/tests/testdata/pile_freelaw-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ d77f3f68aadd6cbf1290c2f6737b2ed5d5c2a60e4c81a65c280f207783caabe1
lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-ps": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 2.1193813610582323e-06, "chrf_stderr": 2.113911466119111e-06, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ps": 0}}
venv/lib/python3.10/site-packages/transformers/models/convnext/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"]
36
+ _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_convnext"] = [
45
+ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "ConvNextForImageClassification",
47
+ "ConvNextModel",
48
+ "ConvNextPreTrainedModel",
49
+ "ConvNextBackbone",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_convnext"] = [
59
+ "TFConvNextForImageClassification",
60
+ "TFConvNextModel",
61
+ "TFConvNextPreTrainedModel",
62
+ ]
63
+
64
+ if TYPE_CHECKING:
65
+ from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
66
+
67
+ try:
68
+ if not is_vision_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .feature_extraction_convnext import ConvNextFeatureExtractor
74
+ from .image_processing_convnext import ConvNextImageProcessor
75
+
76
+ try:
77
+ if not is_torch_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .modeling_convnext import (
83
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
84
+ ConvNextBackbone,
85
+ ConvNextForImageClassification,
86
+ ConvNextModel,
87
+ ConvNextPreTrainedModel,
88
+ )
89
+
90
+ try:
91
+ if not is_tf_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
97
+
98
+
99
+ else:
100
+ import sys
101
+
102
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc ADDED
Binary file (6.02 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvNeXT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an
37
+ ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
38
+ with the defaults will yield a similar configuration to that of the ConvNeXT
39
+ [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ num_channels (`int`, *optional*, defaults to 3):
46
+ The number of input channels.
47
+ patch_size (`int`, optional, defaults to 4):
48
+ Patch size to use in the patch embedding layer.
49
+ num_stages (`int`, optional, defaults to 4):
50
+ The number of stages in the model.
51
+ hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]):
52
+ Dimensionality (hidden size) at each stage.
53
+ depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]):
54
+ Depth (number of blocks) for each stage.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
57
+ `"selu"` and `"gelu_new"` are supported.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-6):
63
+ The initial value for the layer scale.
64
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
65
+ The drop rate for stochastic depth.
66
+ out_features (`List[str]`, *optional*):
67
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
68
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
69
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
70
+ same order as defined in the `stage_names` attribute.
71
+ out_indices (`List[int]`, *optional*):
72
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
73
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
74
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
75
+ same order as defined in the `stage_names` attribute.
76
+
77
+ Example:
78
+ ```python
79
+ >>> from transformers import ConvNextConfig, ConvNextModel
80
+
81
+ >>> # Initializing a ConvNext convnext-tiny-224 style configuration
82
+ >>> configuration = ConvNextConfig()
83
+
84
+ >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration
85
+ >>> model = ConvNextModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "convnext"
92
+
93
+ def __init__(
94
+ self,
95
+ num_channels=3,
96
+ patch_size=4,
97
+ num_stages=4,
98
+ hidden_sizes=None,
99
+ depths=None,
100
+ hidden_act="gelu",
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-12,
103
+ layer_scale_init_value=1e-6,
104
+ drop_path_rate=0.0,
105
+ image_size=224,
106
+ out_features=None,
107
+ out_indices=None,
108
+ **kwargs,
109
+ ):
110
+ super().__init__(**kwargs)
111
+
112
+ self.num_channels = num_channels
113
+ self.patch_size = patch_size
114
+ self.num_stages = num_stages
115
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
116
+ self.depths = [3, 3, 9, 3] if depths is None else depths
117
+ self.hidden_act = hidden_act
118
+ self.initializer_range = initializer_range
119
+ self.layer_norm_eps = layer_norm_eps
120
+ self.layer_scale_init_value = layer_scale_init_value
121
+ self.drop_path_rate = drop_path_rate
122
+ self.image_size = image_size
123
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
124
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
125
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
126
+ )
127
+
128
+
129
+ class ConvNextOnnxConfig(OnnxConfig):
130
+ torch_onnx_minimum_version = version.parse("1.11")
131
+
132
+ @property
133
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
134
+ return OrderedDict(
135
+ [
136
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
137
+ ]
138
+ )
139
+
140
+ @property
141
+ def atol_for_validation(self) -> float:
142
+ return 1e-5
venv/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvNext checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/ConvNeXt"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+ from PIL import Image
28
+
29
+ from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_convnext_config(checkpoint_url):
38
+ config = ConvNextConfig()
39
+
40
+ if "tiny" in checkpoint_url:
41
+ depths = [3, 3, 9, 3]
42
+ hidden_sizes = [96, 192, 384, 768]
43
+ if "small" in checkpoint_url:
44
+ depths = [3, 3, 27, 3]
45
+ hidden_sizes = [96, 192, 384, 768]
46
+ if "base" in checkpoint_url:
47
+ depths = [3, 3, 27, 3]
48
+ hidden_sizes = [128, 256, 512, 1024]
49
+ if "large" in checkpoint_url:
50
+ depths = [3, 3, 27, 3]
51
+ hidden_sizes = [192, 384, 768, 1536]
52
+ if "xlarge" in checkpoint_url:
53
+ depths = [3, 3, 27, 3]
54
+ hidden_sizes = [256, 512, 1024, 2048]
55
+
56
+ if "1k" in checkpoint_url:
57
+ num_labels = 1000
58
+ filename = "imagenet-1k-id2label.json"
59
+ expected_shape = (1, 1000)
60
+ else:
61
+ num_labels = 21841
62
+ filename = "imagenet-22k-id2label.json"
63
+ expected_shape = (1, 21841)
64
+
65
+ repo_id = "huggingface/label-files"
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ if "1k" not in checkpoint_url:
70
+ # this dataset contains 21843 labels but the model only has 21841
71
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
72
+ del id2label[9205]
73
+ del id2label[15027]
74
+ config.id2label = id2label
75
+ config.label2id = {v: k for k, v in id2label.items()}
76
+ config.hidden_sizes = hidden_sizes
77
+ config.depths = depths
78
+
79
+ return config, expected_shape
80
+
81
+
82
+ def rename_key(name):
83
+ if "downsample_layers.0.0" in name:
84
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
85
+ if "downsample_layers.0.1" in name:
86
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
87
+ if "downsample_layers.1.0" in name:
88
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
89
+ if "downsample_layers.1.1" in name:
90
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
91
+ if "downsample_layers.2.0" in name:
92
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
93
+ if "downsample_layers.2.1" in name:
94
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
95
+ if "downsample_layers.3.0" in name:
96
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
97
+ if "downsample_layers.3.1" in name:
98
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
99
+ if "stages" in name and "downsampling_layer" not in name:
100
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
101
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
102
+ if "stages" in name:
103
+ name = name.replace("stages", "encoder.stages")
104
+ if "norm" in name:
105
+ name = name.replace("norm", "layernorm")
106
+ if "gamma" in name:
107
+ name = name.replace("gamma", "layer_scale_parameter")
108
+ if "head" in name:
109
+ name = name.replace("head", "classifier")
110
+
111
+ return name
112
+
113
+
114
+ # We will verify our results on an image of cute cats
115
+ def prepare_img():
116
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
117
+ im = Image.open(requests.get(url, stream=True).raw)
118
+ return im
119
+
120
+
121
+ @torch.no_grad()
122
+ def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
123
+ """
124
+ Copy/paste/tweak model's weights to our ConvNext structure.
125
+ """
126
+
127
+ # define ConvNext configuration based on URL
128
+ config, expected_shape = get_convnext_config(checkpoint_url)
129
+ # load original state_dict from URL
130
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
131
+ # rename keys
132
+ for key in state_dict.copy().keys():
133
+ val = state_dict.pop(key)
134
+ state_dict[rename_key(key)] = val
135
+ # add prefix to all keys expect classifier head
136
+ for key in state_dict.copy().keys():
137
+ val = state_dict.pop(key)
138
+ if not key.startswith("classifier"):
139
+ key = "convnext." + key
140
+ state_dict[key] = val
141
+
142
+ # load HuggingFace model
143
+ model = ConvNextForImageClassification(config)
144
+ model.load_state_dict(state_dict)
145
+ model.eval()
146
+
147
+ # Check outputs on an image, prepared by ConvNextImageProcessor
148
+ size = 224 if "224" in checkpoint_url else 384
149
+ image_processor = ConvNextImageProcessor(size=size)
150
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
151
+
152
+ logits = model(pixel_values).logits
153
+
154
+ # note: the logits below were obtained without center cropping
155
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth":
156
+ expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918])
157
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth":
158
+ expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
159
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth":
160
+ expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
161
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth":
162
+ expected_logits = torch.tensor([0.3561, 0.6350, -0.0384])
163
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth":
164
+ expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
165
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth":
166
+ expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
167
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth":
168
+ expected_logits = torch.tensor([1.2980, 0.3631, -0.1198])
169
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth":
170
+ expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
171
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth":
172
+ expected_logits = torch.tensor([1.7956, 0.8390, 0.2820])
173
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth":
174
+ expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
175
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth":
176
+ expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348])
177
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth":
178
+ expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
179
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth":
180
+ expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
181
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth":
182
+ expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
183
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth":
184
+ expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
185
+ else:
186
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
187
+
188
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3)
189
+ assert logits.shape == expected_shape
190
+
191
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
192
+ print(f"Saving model to {pytorch_dump_folder_path}")
193
+ model.save_pretrained(pytorch_dump_folder_path)
194
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
195
+ image_processor.save_pretrained(pytorch_dump_folder_path)
196
+
197
+ print("Pushing model to the hub...")
198
+ model_name = "convnext"
199
+ if "tiny" in checkpoint_url:
200
+ model_name += "-tiny"
201
+ elif "small" in checkpoint_url:
202
+ model_name += "-small"
203
+ elif "base" in checkpoint_url:
204
+ model_name += "-base"
205
+ elif "xlarge" in checkpoint_url:
206
+ model_name += "-xlarge"
207
+ elif "large" in checkpoint_url:
208
+ model_name += "-large"
209
+ if "224" in checkpoint_url:
210
+ model_name += "-224"
211
+ elif "384" in checkpoint_url:
212
+ model_name += "-384"
213
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
214
+ model_name += "-22k"
215
+ if "22k" in checkpoint_url and "1k" in checkpoint_url:
216
+ model_name += "-22k-1k"
217
+
218
+ model.push_to_hub(
219
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
220
+ organization="nielsr",
221
+ commit_message="Add model",
222
+ )
223
+
224
+
225
+ if __name__ == "__main__":
226
+ parser = argparse.ArgumentParser()
227
+ # Required parameters
228
+ parser.add_argument(
229
+ "--checkpoint_url",
230
+ default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
231
+ type=str,
232
+ help="URL of the original ConvNeXT checkpoint you'd like to convert.",
233
+ )
234
+ parser.add_argument(
235
+ "--pytorch_dump_folder_path",
236
+ default=None,
237
+ type=str,
238
+ required=True,
239
+ help="Path to the output PyTorch model directory.",
240
+ )
241
+
242
+ args = parser.parse_args()
243
+ convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ConvNeXT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_convnext import ConvNextImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ConvNextFeatureExtractor(ConvNextImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ConvNextImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ConvNeXT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ center_crop,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ IMAGENET_STANDARD_MEAN,
30
+ IMAGENET_STANDARD_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ if is_vision_available():
46
+ import PIL
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ class ConvNextImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a ConvNeXT image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
59
+ by `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
61
+ Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is
62
+ resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will
63
+ be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to
64
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can
65
+ be overriden by `size` in the `preprocess` method.
66
+ crop_pct (`float` *optional*, defaults to 224 / 256):
67
+ Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
68
+ overriden by `crop_pct` in the `preprocess` method.
69
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
70
+ Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method.
71
+ do_rescale (`bool`, *optional*, defaults to `True`):
72
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
73
+ the `preprocess` method.
74
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
75
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
76
+ method.
77
+ do_normalize (`bool`, *optional*, defaults to `True`):
78
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
79
+ method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ """
87
+
88
+ model_input_names = ["pixel_values"]
89
+
90
+ def __init__(
91
+ self,
92
+ do_resize: bool = True,
93
+ size: Dict[str, int] = None,
94
+ crop_pct: float = None,
95
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
96
+ do_rescale: bool = True,
97
+ rescale_factor: Union[int, float] = 1 / 255,
98
+ do_normalize: bool = True,
99
+ image_mean: Optional[Union[float, List[float]]] = None,
100
+ image_std: Optional[Union[float, List[float]]] = None,
101
+ **kwargs,
102
+ ) -> None:
103
+ super().__init__(**kwargs)
104
+ size = size if size is not None else {"shortest_edge": 384}
105
+ size = get_size_dict(size, default_to_square=False)
106
+
107
+ self.do_resize = do_resize
108
+ self.size = size
109
+ # Default value set here for backwards compatibility where the value in config is None
110
+ self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
111
+ self.resample = resample
112
+ self.do_rescale = do_rescale
113
+ self.rescale_factor = rescale_factor
114
+ self.do_normalize = do_normalize
115
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
116
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
117
+ self._valid_processor_keys = [
118
+ "images",
119
+ "do_resize",
120
+ "size",
121
+ "crop_pct",
122
+ "resample",
123
+ "do_rescale",
124
+ "rescale_factor",
125
+ "do_normalize",
126
+ "image_mean",
127
+ "image_std",
128
+ "return_tensors",
129
+ "data_format",
130
+ "input_data_format",
131
+ ]
132
+
133
+ def resize(
134
+ self,
135
+ image: np.ndarray,
136
+ size: Dict[str, int],
137
+ crop_pct: float,
138
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
139
+ data_format: Optional[Union[str, ChannelDimension]] = None,
140
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ **kwargs,
142
+ ) -> np.ndarray:
143
+ """
144
+ Resize an image.
145
+
146
+ Args:
147
+ image (`np.ndarray`):
148
+ Image to resize.
149
+ size (`Dict[str, int]`):
150
+ Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If
151
+ `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`.
152
+ Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`,
153
+ after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`.
154
+ crop_pct (`float`):
155
+ Percentage of the image to crop. Only has an effect if size < 384.
156
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
157
+ Resampling filter to use when resizing the image.
158
+ data_format (`str` or `ChannelDimension`, *optional*):
159
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
160
+ input_data_format (`ChannelDimension` or `str`, *optional*):
161
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
162
+ image.
163
+ """
164
+ size = get_size_dict(size, default_to_square=False)
165
+ if "shortest_edge" not in size:
166
+ raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
167
+ shortest_edge = size["shortest_edge"]
168
+
169
+ if shortest_edge < 384:
170
+ # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
171
+ resize_shortest_edge = int(shortest_edge / crop_pct)
172
+ resize_size = get_resize_output_image_size(
173
+ image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format
174
+ )
175
+ image = resize(
176
+ image=image,
177
+ size=resize_size,
178
+ resample=resample,
179
+ data_format=data_format,
180
+ input_data_format=input_data_format,
181
+ **kwargs,
182
+ )
183
+ # then crop to (shortest_edge, shortest_edge)
184
+ return center_crop(
185
+ image=image,
186
+ size=(shortest_edge, shortest_edge),
187
+ data_format=data_format,
188
+ input_data_format=input_data_format,
189
+ **kwargs,
190
+ )
191
+ else:
192
+ # warping (no cropping) when evaluated at 384 or larger
193
+ return resize(
194
+ image,
195
+ size=(shortest_edge, shortest_edge),
196
+ resample=resample,
197
+ data_format=data_format,
198
+ input_data_format=input_data_format,
199
+ **kwargs,
200
+ )
201
+
202
+ def preprocess(
203
+ self,
204
+ images: ImageInput,
205
+ do_resize: bool = None,
206
+ size: Dict[str, int] = None,
207
+ crop_pct: float = None,
208
+ resample: PILImageResampling = None,
209
+ do_rescale: bool = None,
210
+ rescale_factor: float = None,
211
+ do_normalize: bool = None,
212
+ image_mean: Optional[Union[float, List[float]]] = None,
213
+ image_std: Optional[Union[float, List[float]]] = None,
214
+ return_tensors: Optional[Union[str, TensorType]] = None,
215
+ data_format: ChannelDimension = ChannelDimension.FIRST,
216
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
217
+ **kwargs,
218
+ ) -> PIL.Image.Image:
219
+ """
220
+ Preprocess an image or batch of images.
221
+
222
+ Args:
223
+ images (`ImageInput`):
224
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
225
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
226
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
227
+ Whether to resize the image.
228
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
229
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
230
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
231
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
232
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
233
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
234
+ Percentage of the image to crop if size < 384.
235
+ resample (`int`, *optional*, defaults to `self.resample`):
236
+ Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
237
+ has an effect if `do_resize` is set to `True`.
238
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
239
+ Whether to rescale the image values between [0 - 1].
240
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
241
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
242
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
243
+ Whether to normalize the image.
244
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
245
+ Image mean.
246
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
247
+ Image standard deviation.
248
+ return_tensors (`str` or `TensorType`, *optional*):
249
+ The type of tensors to return. Can be one of:
250
+ - Unset: Return a list of `np.ndarray`.
251
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
252
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
253
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
254
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
255
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
256
+ The channel dimension format for the output image. Can be one of:
257
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
258
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
259
+ - Unset: Use the channel dimension format of the input image.
260
+ input_data_format (`ChannelDimension` or `str`, *optional*):
261
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
262
+ from the input image. Can be one of:
263
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
264
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
265
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
266
+ """
267
+ do_resize = do_resize if do_resize is not None else self.do_resize
268
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
269
+ resample = resample if resample is not None else self.resample
270
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
271
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
272
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
273
+ image_mean = image_mean if image_mean is not None else self.image_mean
274
+ image_std = image_std if image_std is not None else self.image_std
275
+
276
+ size = size if size is not None else self.size
277
+ size = get_size_dict(size, default_to_square=False)
278
+
279
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
280
+
281
+ images = make_list_of_images(images)
282
+
283
+ if not valid_images(images):
284
+ raise ValueError(
285
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
286
+ "torch.Tensor, tf.Tensor or jax.ndarray."
287
+ )
288
+
289
+ validate_preprocess_arguments(
290
+ do_rescale=do_rescale,
291
+ rescale_factor=rescale_factor,
292
+ do_normalize=do_normalize,
293
+ image_mean=image_mean,
294
+ image_std=image_std,
295
+ do_resize=do_resize,
296
+ size=size,
297
+ resample=resample,
298
+ )
299
+
300
+ # All transformations expect numpy arrays.
301
+ images = [to_numpy_array(image) for image in images]
302
+
303
+ if is_scaled_image(images[0]) and do_rescale:
304
+ logger.warning_once(
305
+ "It looks like you are trying to rescale already rescaled images. If the input"
306
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
307
+ )
308
+
309
+ if input_data_format is None:
310
+ # We assume that all images have the same channel dimension format.
311
+ input_data_format = infer_channel_dimension_format(images[0])
312
+
313
+ if do_resize:
314
+ images = [
315
+ self.resize(
316
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
317
+ )
318
+ for image in images
319
+ ]
320
+
321
+ if do_rescale:
322
+ images = [
323
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
324
+ for image in images
325
+ ]
326
+
327
+ if do_normalize:
328
+ images = [
329
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
330
+ for image in images
331
+ ]
332
+
333
+ images = [
334
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
335
+ ]
336
+
337
+ data = {"pixel_values": images}
338
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvNext model."""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_convnext import ConvNextConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ConvNextConfig"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
56
+
57
+
58
+ from ..deprecated._archive_maps import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ # Copied from transformers.models.beit.modeling_beit.drop_path
62
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
63
+ """
64
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
65
+
66
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
67
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
68
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
69
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
70
+ argument.
71
+ """
72
+ if drop_prob == 0.0 or not training:
73
+ return input
74
+ keep_prob = 1 - drop_prob
75
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
76
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
77
+ random_tensor.floor_() # binarize
78
+ output = input.div(keep_prob) * random_tensor
79
+ return output
80
+
81
+
82
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
83
+ class ConvNextDropPath(nn.Module):
84
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
85
+
86
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
87
+ super().__init__()
88
+ self.drop_prob = drop_prob
89
+
90
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
91
+ return drop_path(hidden_states, self.drop_prob, self.training)
92
+
93
+ def extra_repr(self) -> str:
94
+ return "p={}".format(self.drop_prob)
95
+
96
+
97
+ class ConvNextLayerNorm(nn.Module):
98
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
99
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
100
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
101
+ """
102
+
103
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
104
+ super().__init__()
105
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
106
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
107
+ self.eps = eps
108
+ self.data_format = data_format
109
+ if self.data_format not in ["channels_last", "channels_first"]:
110
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
111
+ self.normalized_shape = (normalized_shape,)
112
+
113
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
114
+ if self.data_format == "channels_last":
115
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
116
+ elif self.data_format == "channels_first":
117
+ input_dtype = x.dtype
118
+ x = x.float()
119
+ u = x.mean(1, keepdim=True)
120
+ s = (x - u).pow(2).mean(1, keepdim=True)
121
+ x = (x - u) / torch.sqrt(s + self.eps)
122
+ x = x.to(dtype=input_dtype)
123
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
124
+ return x
125
+
126
+
127
+ class ConvNextEmbeddings(nn.Module):
128
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
129
+ found in src/transformers/models/swin/modeling_swin.py.
130
+ """
131
+
132
+ def __init__(self, config):
133
+ super().__init__()
134
+ self.patch_embeddings = nn.Conv2d(
135
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
136
+ )
137
+ self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
138
+ self.num_channels = config.num_channels
139
+
140
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
141
+ num_channels = pixel_values.shape[1]
142
+ if num_channels != self.num_channels:
143
+ raise ValueError(
144
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
145
+ )
146
+ embeddings = self.patch_embeddings(pixel_values)
147
+ embeddings = self.layernorm(embeddings)
148
+ return embeddings
149
+
150
+
151
+ class ConvNextLayer(nn.Module):
152
+ """This corresponds to the `Block` class in the original implementation.
153
+
154
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
155
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
156
+
157
+ The authors used (2) as they find it slightly faster in PyTorch.
158
+
159
+ Args:
160
+ config ([`ConvNextConfig`]): Model configuration class.
161
+ dim (`int`): Number of input channels.
162
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
163
+ """
164
+
165
+ def __init__(self, config, dim, drop_path=0):
166
+ super().__init__()
167
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
168
+ self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
169
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
170
+ self.act = ACT2FN[config.hidden_act]
171
+ self.pwconv2 = nn.Linear(4 * dim, dim)
172
+ self.layer_scale_parameter = (
173
+ nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
174
+ if config.layer_scale_init_value > 0
175
+ else None
176
+ )
177
+ self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
178
+
179
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
180
+ input = hidden_states
181
+ x = self.dwconv(hidden_states)
182
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
183
+ x = self.layernorm(x)
184
+ x = self.pwconv1(x)
185
+ x = self.act(x)
186
+ x = self.pwconv2(x)
187
+ if self.layer_scale_parameter is not None:
188
+ x = self.layer_scale_parameter * x
189
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
190
+
191
+ x = input + self.drop_path(x)
192
+ return x
193
+
194
+
195
+ class ConvNextStage(nn.Module):
196
+ """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
197
+
198
+ Args:
199
+ config ([`ConvNextConfig`]): Model configuration class.
200
+ in_channels (`int`): Number of input channels.
201
+ out_channels (`int`): Number of output channels.
202
+ depth (`int`): Number of residual blocks.
203
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
204
+ """
205
+
206
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
207
+ super().__init__()
208
+
209
+ if in_channels != out_channels or stride > 1:
210
+ self.downsampling_layer = nn.Sequential(
211
+ ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
212
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
213
+ )
214
+ else:
215
+ self.downsampling_layer = nn.Identity()
216
+ drop_path_rates = drop_path_rates or [0.0] * depth
217
+ self.layers = nn.Sequential(
218
+ *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
219
+ )
220
+
221
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
222
+ hidden_states = self.downsampling_layer(hidden_states)
223
+ hidden_states = self.layers(hidden_states)
224
+ return hidden_states
225
+
226
+
227
+ class ConvNextEncoder(nn.Module):
228
+ def __init__(self, config):
229
+ super().__init__()
230
+ self.stages = nn.ModuleList()
231
+ drop_path_rates = [
232
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
233
+ ]
234
+ prev_chs = config.hidden_sizes[0]
235
+ for i in range(config.num_stages):
236
+ out_chs = config.hidden_sizes[i]
237
+ stage = ConvNextStage(
238
+ config,
239
+ in_channels=prev_chs,
240
+ out_channels=out_chs,
241
+ stride=2 if i > 0 else 1,
242
+ depth=config.depths[i],
243
+ drop_path_rates=drop_path_rates[i],
244
+ )
245
+ self.stages.append(stage)
246
+ prev_chs = out_chs
247
+
248
+ def forward(
249
+ self,
250
+ hidden_states: torch.FloatTensor,
251
+ output_hidden_states: Optional[bool] = False,
252
+ return_dict: Optional[bool] = True,
253
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
254
+ all_hidden_states = () if output_hidden_states else None
255
+
256
+ for i, layer_module in enumerate(self.stages):
257
+ if output_hidden_states:
258
+ all_hidden_states = all_hidden_states + (hidden_states,)
259
+
260
+ hidden_states = layer_module(hidden_states)
261
+
262
+ if output_hidden_states:
263
+ all_hidden_states = all_hidden_states + (hidden_states,)
264
+
265
+ if not return_dict:
266
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
267
+
268
+ return BaseModelOutputWithNoAttention(
269
+ last_hidden_state=hidden_states,
270
+ hidden_states=all_hidden_states,
271
+ )
272
+
273
+
274
+ class ConvNextPreTrainedModel(PreTrainedModel):
275
+ """
276
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
277
+ models.
278
+ """
279
+
280
+ config_class = ConvNextConfig
281
+ base_model_prefix = "convnext"
282
+ main_input_name = "pixel_values"
283
+
284
+ def _init_weights(self, module):
285
+ """Initialize the weights"""
286
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
287
+ # Slightly different from the TF version which uses truncated_normal for initialization
288
+ # cf https://github.com/pytorch/pytorch/pull/5617
289
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
290
+ if module.bias is not None:
291
+ module.bias.data.zero_()
292
+ elif isinstance(module, nn.LayerNorm):
293
+ module.bias.data.zero_()
294
+ module.weight.data.fill_(1.0)
295
+
296
+
297
+ CONVNEXT_START_DOCSTRING = r"""
298
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
299
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
300
+ behavior.
301
+
302
+ Parameters:
303
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
304
+ Initializing with a config file does not load the weights associated with the model, only the
305
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
306
+ """
307
+
308
+ CONVNEXT_INPUTS_DOCSTRING = r"""
309
+ Args:
310
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
311
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
312
+ [`ConvNextImageProcessor.__call__`] for details.
313
+
314
+ output_hidden_states (`bool`, *optional*):
315
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
316
+ more detail.
317
+ return_dict (`bool`, *optional*):
318
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
319
+ """
320
+
321
+
322
+ @add_start_docstrings(
323
+ "The bare ConvNext model outputting raw features without any specific head on top.",
324
+ CONVNEXT_START_DOCSTRING,
325
+ )
326
+ class ConvNextModel(ConvNextPreTrainedModel):
327
+ def __init__(self, config):
328
+ super().__init__(config)
329
+ self.config = config
330
+
331
+ self.embeddings = ConvNextEmbeddings(config)
332
+ self.encoder = ConvNextEncoder(config)
333
+
334
+ # final layernorm layer
335
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
336
+
337
+ # Initialize weights and apply final processing
338
+ self.post_init()
339
+
340
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
341
+ @add_code_sample_docstrings(
342
+ checkpoint=_CHECKPOINT_FOR_DOC,
343
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
344
+ config_class=_CONFIG_FOR_DOC,
345
+ modality="vision",
346
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
347
+ )
348
+ def forward(
349
+ self,
350
+ pixel_values: torch.FloatTensor = None,
351
+ output_hidden_states: Optional[bool] = None,
352
+ return_dict: Optional[bool] = None,
353
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
354
+ output_hidden_states = (
355
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
356
+ )
357
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
358
+
359
+ if pixel_values is None:
360
+ raise ValueError("You have to specify pixel_values")
361
+
362
+ embedding_output = self.embeddings(pixel_values)
363
+
364
+ encoder_outputs = self.encoder(
365
+ embedding_output,
366
+ output_hidden_states=output_hidden_states,
367
+ return_dict=return_dict,
368
+ )
369
+
370
+ last_hidden_state = encoder_outputs[0]
371
+
372
+ # global average pooling, (N, C, H, W) -> (N, C)
373
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
374
+
375
+ if not return_dict:
376
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
377
+
378
+ return BaseModelOutputWithPoolingAndNoAttention(
379
+ last_hidden_state=last_hidden_state,
380
+ pooler_output=pooled_output,
381
+ hidden_states=encoder_outputs.hidden_states,
382
+ )
383
+
384
+
385
+ @add_start_docstrings(
386
+ """
387
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
388
+ ImageNet.
389
+ """,
390
+ CONVNEXT_START_DOCSTRING,
391
+ )
392
+ class ConvNextForImageClassification(ConvNextPreTrainedModel):
393
+ def __init__(self, config):
394
+ super().__init__(config)
395
+
396
+ self.num_labels = config.num_labels
397
+ self.convnext = ConvNextModel(config)
398
+
399
+ # Classifier head
400
+ self.classifier = (
401
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
402
+ )
403
+
404
+ # Initialize weights and apply final processing
405
+ self.post_init()
406
+
407
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
408
+ @add_code_sample_docstrings(
409
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
410
+ output_type=ImageClassifierOutputWithNoAttention,
411
+ config_class=_CONFIG_FOR_DOC,
412
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
413
+ )
414
+ def forward(
415
+ self,
416
+ pixel_values: torch.FloatTensor = None,
417
+ labels: Optional[torch.LongTensor] = None,
418
+ output_hidden_states: Optional[bool] = None,
419
+ return_dict: Optional[bool] = None,
420
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
421
+ r"""
422
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
423
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
424
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
425
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
426
+ """
427
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
428
+
429
+ outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
430
+
431
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
432
+
433
+ logits = self.classifier(pooled_output)
434
+
435
+ loss = None
436
+ if labels is not None:
437
+ if self.config.problem_type is None:
438
+ if self.num_labels == 1:
439
+ self.config.problem_type = "regression"
440
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
441
+ self.config.problem_type = "single_label_classification"
442
+ else:
443
+ self.config.problem_type = "multi_label_classification"
444
+
445
+ if self.config.problem_type == "regression":
446
+ loss_fct = MSELoss()
447
+ if self.num_labels == 1:
448
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
449
+ else:
450
+ loss = loss_fct(logits, labels)
451
+ elif self.config.problem_type == "single_label_classification":
452
+ loss_fct = CrossEntropyLoss()
453
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
454
+ elif self.config.problem_type == "multi_label_classification":
455
+ loss_fct = BCEWithLogitsLoss()
456
+ loss = loss_fct(logits, labels)
457
+ if not return_dict:
458
+ output = (logits,) + outputs[2:]
459
+ return ((loss,) + output) if loss is not None else output
460
+
461
+ return ImageClassifierOutputWithNoAttention(
462
+ loss=loss,
463
+ logits=logits,
464
+ hidden_states=outputs.hidden_states,
465
+ )
466
+
467
+
468
+ @add_start_docstrings(
469
+ """
470
+ ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
471
+ """,
472
+ CONVNEXT_START_DOCSTRING,
473
+ )
474
+ class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):
475
+ def __init__(self, config):
476
+ super().__init__(config)
477
+ super()._init_backbone(config)
478
+
479
+ self.embeddings = ConvNextEmbeddings(config)
480
+ self.encoder = ConvNextEncoder(config)
481
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
482
+
483
+ # Add layer norms to hidden states of out_features
484
+ hidden_states_norms = {}
485
+ for stage, num_channels in zip(self._out_features, self.channels):
486
+ hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first")
487
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
488
+
489
+ # initialize weights and apply final processing
490
+ self.post_init()
491
+
492
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
493
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
494
+ def forward(
495
+ self,
496
+ pixel_values: torch.Tensor,
497
+ output_hidden_states: Optional[bool] = None,
498
+ return_dict: Optional[bool] = None,
499
+ ) -> BackboneOutput:
500
+ """
501
+ Returns:
502
+
503
+ Examples:
504
+
505
+ ```python
506
+ >>> from transformers import AutoImageProcessor, AutoBackbone
507
+ >>> import torch
508
+ >>> from PIL import Image
509
+ >>> import requests
510
+
511
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
512
+ >>> image = Image.open(requests.get(url, stream=True).raw)
513
+
514
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
515
+ >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224")
516
+
517
+ >>> inputs = processor(image, return_tensors="pt")
518
+ >>> outputs = model(**inputs)
519
+ ```"""
520
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
521
+ output_hidden_states = (
522
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
523
+ )
524
+
525
+ embedding_output = self.embeddings(pixel_values)
526
+
527
+ outputs = self.encoder(
528
+ embedding_output,
529
+ output_hidden_states=True,
530
+ return_dict=return_dict,
531
+ )
532
+
533
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
534
+
535
+ feature_maps = ()
536
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
537
+ if stage in self.out_features:
538
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
539
+ feature_maps += (hidden_state,)
540
+
541
+ if not return_dict:
542
+ output = (feature_maps,)
543
+ if output_hidden_states:
544
+ output += (hidden_states,)
545
+ return output
546
+
547
+ return BackboneOutput(
548
+ feature_maps=feature_maps,
549
+ hidden_states=hidden_states if output_hidden_states else None,
550
+ attentions=None,
551
+ )
venv/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvNext model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
27
+ from ...modeling_tf_utils import (
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras,
33
+ keras_serializable,
34
+ unpack_inputs,
35
+ )
36
+ from ...tf_utils import shape_list
37
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
38
+ from .configuration_convnext import ConvNextConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ _CONFIG_FOR_DOC = "ConvNextConfig"
45
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
46
+
47
+
48
+ class TFConvNextDropPath(keras.layers.Layer):
49
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
50
+ References:
51
+ (1) github.com:rwightman/pytorch-image-models
52
+ """
53
+
54
+ def __init__(self, drop_path: float, **kwargs):
55
+ super().__init__(**kwargs)
56
+ self.drop_path = drop_path
57
+
58
+ def call(self, x: tf.Tensor, training=None):
59
+ if training:
60
+ keep_prob = 1 - self.drop_path
61
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
62
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
63
+ random_tensor = tf.floor(random_tensor)
64
+ return (x / keep_prob) * random_tensor
65
+ return x
66
+
67
+
68
+ class TFConvNextEmbeddings(keras.layers.Layer):
69
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
70
+ found in src/transformers/models/swin/modeling_swin.py.
71
+ """
72
+
73
+ def __init__(self, config: ConvNextConfig, **kwargs):
74
+ super().__init__(**kwargs)
75
+ self.patch_embeddings = keras.layers.Conv2D(
76
+ filters=config.hidden_sizes[0],
77
+ kernel_size=config.patch_size,
78
+ strides=config.patch_size,
79
+ name="patch_embeddings",
80
+ kernel_initializer=get_initializer(config.initializer_range),
81
+ bias_initializer=keras.initializers.Zeros(),
82
+ )
83
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
84
+ self.num_channels = config.num_channels
85
+ self.config = config
86
+
87
+ def call(self, pixel_values):
88
+ if isinstance(pixel_values, dict):
89
+ pixel_values = pixel_values["pixel_values"]
90
+
91
+ tf.debugging.assert_equal(
92
+ shape_list(pixel_values)[1],
93
+ self.num_channels,
94
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
95
+ )
96
+
97
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
98
+ # So change the input format from `NCHW` to `NHWC`.
99
+ # shape = (batch_size, in_height, in_width, in_channels)
100
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
101
+
102
+ embeddings = self.patch_embeddings(pixel_values)
103
+ embeddings = self.layernorm(embeddings)
104
+ return embeddings
105
+
106
+ def build(self, input_shape=None):
107
+ if self.built:
108
+ return
109
+ self.built = True
110
+ if getattr(self, "patch_embeddings", None) is not None:
111
+ with tf.name_scope(self.patch_embeddings.name):
112
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
113
+ if getattr(self, "layernorm", None) is not None:
114
+ with tf.name_scope(self.layernorm.name):
115
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
116
+
117
+
118
+ class TFConvNextLayer(keras.layers.Layer):
119
+ """This corresponds to the `Block` class in the original implementation.
120
+
121
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
122
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
123
+
124
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
125
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
126
+
127
+ Args:
128
+ config ([`ConvNextConfig`]): Model configuration class.
129
+ dim (`int`): Number of input channels.
130
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
131
+ """
132
+
133
+ def __init__(self, config, dim, drop_path=0.0, **kwargs):
134
+ super().__init__(**kwargs)
135
+ self.dim = dim
136
+ self.config = config
137
+ self.dwconv = keras.layers.Conv2D(
138
+ filters=dim,
139
+ kernel_size=7,
140
+ padding="same",
141
+ groups=dim,
142
+ kernel_initializer=get_initializer(config.initializer_range),
143
+ bias_initializer="zeros",
144
+ name="dwconv",
145
+ ) # depthwise conv
146
+ self.layernorm = keras.layers.LayerNormalization(
147
+ epsilon=1e-6,
148
+ name="layernorm",
149
+ )
150
+ self.pwconv1 = keras.layers.Dense(
151
+ units=4 * dim,
152
+ kernel_initializer=get_initializer(config.initializer_range),
153
+ bias_initializer="zeros",
154
+ name="pwconv1",
155
+ ) # pointwise/1x1 convs, implemented with linear layers
156
+ self.act = get_tf_activation(config.hidden_act)
157
+ self.pwconv2 = keras.layers.Dense(
158
+ units=dim,
159
+ kernel_initializer=get_initializer(config.initializer_range),
160
+ bias_initializer="zeros",
161
+ name="pwconv2",
162
+ )
163
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
164
+ # behaviour.
165
+ self.drop_path = (
166
+ TFConvNextDropPath(drop_path, name="drop_path")
167
+ if drop_path > 0.0
168
+ else keras.layers.Activation("linear", name="drop_path")
169
+ )
170
+
171
+ def build(self, input_shape: tf.TensorShape = None):
172
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
173
+ self.layer_scale_parameter = (
174
+ self.add_weight(
175
+ shape=(self.dim,),
176
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
177
+ trainable=True,
178
+ name="layer_scale_parameter",
179
+ )
180
+ if self.config.layer_scale_init_value > 0
181
+ else None
182
+ )
183
+
184
+ if self.built:
185
+ return
186
+ self.built = True
187
+ if getattr(self, "dwconv", None) is not None:
188
+ with tf.name_scope(self.dwconv.name):
189
+ self.dwconv.build([None, None, None, self.dim])
190
+ if getattr(self, "layernorm", None) is not None:
191
+ with tf.name_scope(self.layernorm.name):
192
+ self.layernorm.build([None, None, None, self.dim])
193
+ if getattr(self, "pwconv1", None) is not None:
194
+ with tf.name_scope(self.pwconv1.name):
195
+ self.pwconv1.build([None, None, self.dim])
196
+ if getattr(self, "pwconv2", None) is not None:
197
+ with tf.name_scope(self.pwconv2.name):
198
+ self.pwconv2.build([None, None, 4 * self.dim])
199
+ if getattr(self, "drop_path", None) is not None:
200
+ with tf.name_scope(self.drop_path.name):
201
+ self.drop_path.build(None)
202
+
203
+ def call(self, hidden_states, training=False):
204
+ input = hidden_states
205
+ x = self.dwconv(hidden_states)
206
+ x = self.layernorm(x)
207
+ x = self.pwconv1(x)
208
+ x = self.act(x)
209
+ x = self.pwconv2(x)
210
+
211
+ if self.layer_scale_parameter is not None:
212
+ x = self.layer_scale_parameter * x
213
+
214
+ x = input + self.drop_path(x, training=training)
215
+ return x
216
+
217
+
218
+ class TFConvNextStage(keras.layers.Layer):
219
+ """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
220
+
221
+ Args:
222
+ config (`ConvNextV2Config`):
223
+ Model configuration class.
224
+ in_channels (`int`):
225
+ Number of input channels.
226
+ out_channels (`int`):
227
+ Number of output channels.
228
+ depth (`int`):
229
+ Number of residual blocks.
230
+ drop_path_rates(`List[float]`):
231
+ Stochastic depth rates for each layer.
232
+ """
233
+
234
+ def __init__(
235
+ self,
236
+ config: ConvNextConfig,
237
+ in_channels: int,
238
+ out_channels: int,
239
+ kernel_size: int = 2,
240
+ stride: int = 2,
241
+ depth: int = 2,
242
+ drop_path_rates: Optional[List[float]] = None,
243
+ **kwargs,
244
+ ):
245
+ super().__init__(**kwargs)
246
+ if in_channels != out_channels or stride > 1:
247
+ self.downsampling_layer = [
248
+ keras.layers.LayerNormalization(
249
+ epsilon=1e-6,
250
+ name="downsampling_layer.0",
251
+ ),
252
+ # Inputs to this layer will follow NHWC format since we
253
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
254
+ # layer. All the outputs throughout the model will be in NHWC
255
+ # from this point on until the output where we again change to
256
+ # NCHW.
257
+ keras.layers.Conv2D(
258
+ filters=out_channels,
259
+ kernel_size=kernel_size,
260
+ strides=stride,
261
+ kernel_initializer=get_initializer(config.initializer_range),
262
+ bias_initializer=keras.initializers.Zeros(),
263
+ name="downsampling_layer.1",
264
+ ),
265
+ ]
266
+ else:
267
+ self.downsampling_layer = [tf.identity]
268
+
269
+ drop_path_rates = drop_path_rates or [0.0] * depth
270
+ self.layers = [
271
+ TFConvNextLayer(
272
+ config,
273
+ dim=out_channels,
274
+ drop_path=drop_path_rates[j],
275
+ name=f"layers.{j}",
276
+ )
277
+ for j in range(depth)
278
+ ]
279
+ self.in_channels = in_channels
280
+ self.out_channels = out_channels
281
+ self.stride = stride
282
+
283
+ def call(self, hidden_states):
284
+ for layer in self.downsampling_layer:
285
+ hidden_states = layer(hidden_states)
286
+ for layer in self.layers:
287
+ hidden_states = layer(hidden_states)
288
+ return hidden_states
289
+
290
+ def build(self, input_shape=None):
291
+ if self.built:
292
+ return
293
+ self.built = True
294
+ if getattr(self, "layers", None) is not None:
295
+ for layer in self.layers:
296
+ with tf.name_scope(layer.name):
297
+ layer.build(None)
298
+ if self.in_channels != self.out_channels or self.stride > 1:
299
+ with tf.name_scope(self.downsampling_layer[0].name):
300
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
301
+ with tf.name_scope(self.downsampling_layer[1].name):
302
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
303
+
304
+
305
+ class TFConvNextEncoder(keras.layers.Layer):
306
+ def __init__(self, config, **kwargs):
307
+ super().__init__(**kwargs)
308
+ self.stages = []
309
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
310
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
311
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
312
+ prev_chs = config.hidden_sizes[0]
313
+ for i in range(config.num_stages):
314
+ out_chs = config.hidden_sizes[i]
315
+ stage = TFConvNextStage(
316
+ config,
317
+ in_channels=prev_chs,
318
+ out_channels=out_chs,
319
+ stride=2 if i > 0 else 1,
320
+ depth=config.depths[i],
321
+ drop_path_rates=drop_path_rates[i],
322
+ name=f"stages.{i}",
323
+ )
324
+ self.stages.append(stage)
325
+ prev_chs = out_chs
326
+
327
+ def call(self, hidden_states, output_hidden_states=False, return_dict=True):
328
+ all_hidden_states = () if output_hidden_states else None
329
+
330
+ for i, layer_module in enumerate(self.stages):
331
+ if output_hidden_states:
332
+ all_hidden_states = all_hidden_states + (hidden_states,)
333
+
334
+ hidden_states = layer_module(hidden_states)
335
+
336
+ if output_hidden_states:
337
+ all_hidden_states = all_hidden_states + (hidden_states,)
338
+
339
+ if not return_dict:
340
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
341
+
342
+ return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
343
+
344
+ def build(self, input_shape=None):
345
+ for stage in self.stages:
346
+ with tf.name_scope(stage.name):
347
+ stage.build(None)
348
+
349
+
350
+ @keras_serializable
351
+ class TFConvNextMainLayer(keras.layers.Layer):
352
+ config_class = ConvNextConfig
353
+
354
+ def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
355
+ super().__init__(**kwargs)
356
+
357
+ self.config = config
358
+ self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
359
+ self.encoder = TFConvNextEncoder(config, name="encoder")
360
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
361
+ # We are setting the `data_format` like so because from here on we will revert to the
362
+ # NCHW output format
363
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
364
+
365
+ @unpack_inputs
366
+ def call(
367
+ self,
368
+ pixel_values: TFModelInputType | None = None,
369
+ output_hidden_states: Optional[bool] = None,
370
+ return_dict: Optional[bool] = None,
371
+ training: bool = False,
372
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
373
+ output_hidden_states = (
374
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
375
+ )
376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
377
+
378
+ if pixel_values is None:
379
+ raise ValueError("You have to specify pixel_values")
380
+
381
+ embedding_output = self.embeddings(pixel_values, training=training)
382
+
383
+ encoder_outputs = self.encoder(
384
+ embedding_output,
385
+ output_hidden_states=output_hidden_states,
386
+ return_dict=return_dict,
387
+ training=training,
388
+ )
389
+
390
+ last_hidden_state = encoder_outputs[0]
391
+ # Change to NCHW output format have uniformity in the modules
392
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
393
+ pooled_output = self.layernorm(self.pooler(last_hidden_state))
394
+
395
+ # Change the other hidden state outputs to NCHW as well
396
+ if output_hidden_states:
397
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
398
+
399
+ if not return_dict:
400
+ hidden_states = hidden_states if output_hidden_states else ()
401
+ return (last_hidden_state, pooled_output) + hidden_states
402
+
403
+ return TFBaseModelOutputWithPooling(
404
+ last_hidden_state=last_hidden_state,
405
+ pooler_output=pooled_output,
406
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
407
+ )
408
+
409
+ def build(self, input_shape=None):
410
+ if self.built:
411
+ return
412
+ self.built = True
413
+ if getattr(self, "embeddings", None) is not None:
414
+ with tf.name_scope(self.embeddings.name):
415
+ self.embeddings.build(None)
416
+ if getattr(self, "encoder", None) is not None:
417
+ with tf.name_scope(self.encoder.name):
418
+ self.encoder.build(None)
419
+ if getattr(self, "layernorm", None) is not None:
420
+ with tf.name_scope(self.layernorm.name):
421
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
422
+
423
+
424
+ class TFConvNextPreTrainedModel(TFPreTrainedModel):
425
+ """
426
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
427
+ models.
428
+ """
429
+
430
+ config_class = ConvNextConfig
431
+ base_model_prefix = "convnext"
432
+ main_input_name = "pixel_values"
433
+
434
+
435
+ CONVNEXT_START_DOCSTRING = r"""
436
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
437
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
438
+ etc.)
439
+
440
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
441
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
442
+ behavior.
443
+
444
+ <Tip>
445
+
446
+ TensorFlow models and layers in `transformers` accept two formats as input:
447
+
448
+ - having all inputs as keyword arguments (like PyTorch models), or
449
+ - having all inputs as a list, tuple or dict in the first positional argument.
450
+
451
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
452
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
453
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
454
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
455
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
456
+ positional argument:
457
+
458
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
459
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
460
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
461
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
462
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
463
+
464
+ Note that when creating models and layers with
465
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
466
+ about any of this, as you can just pass inputs like you would to any other Python function!
467
+
468
+ </Tip>
469
+
470
+ Parameters:
471
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
472
+ Initializing with a config file does not load the weights associated with the model, only the
473
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
474
+ """
475
+
476
+ CONVNEXT_INPUTS_DOCSTRING = r"""
477
+ Args:
478
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
479
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
480
+ [`ConvNextImageProcessor.__call__`] for details.
481
+
482
+ output_hidden_states (`bool`, *optional*):
483
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
484
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
485
+ used instead.
486
+ return_dict (`bool`, *optional*):
487
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
488
+ eager mode, in graph mode the value will always be set to True.
489
+ """
490
+
491
+
492
+ @add_start_docstrings(
493
+ "The bare ConvNext model outputting raw features without any specific head on top.",
494
+ CONVNEXT_START_DOCSTRING,
495
+ )
496
+ class TFConvNextModel(TFConvNextPreTrainedModel):
497
+ def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
498
+ super().__init__(config, *inputs, **kwargs)
499
+ self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
500
+
501
+ @unpack_inputs
502
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
503
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
504
+ def call(
505
+ self,
506
+ pixel_values: TFModelInputType | None = None,
507
+ output_hidden_states: Optional[bool] = None,
508
+ return_dict: Optional[bool] = None,
509
+ training: bool = False,
510
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
511
+ r"""
512
+ Returns:
513
+
514
+ Examples:
515
+
516
+ ```python
517
+ >>> from transformers import AutoImageProcessor, TFConvNextModel
518
+ >>> from PIL import Image
519
+ >>> import requests
520
+
521
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
522
+ >>> image = Image.open(requests.get(url, stream=True).raw)
523
+
524
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
525
+ >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
526
+
527
+ >>> inputs = image_processor(images=image, return_tensors="tf")
528
+ >>> outputs = model(**inputs)
529
+ >>> last_hidden_states = outputs.last_hidden_state
530
+ ```"""
531
+ output_hidden_states = (
532
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
533
+ )
534
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
535
+
536
+ if pixel_values is None:
537
+ raise ValueError("You have to specify pixel_values")
538
+
539
+ outputs = self.convnext(
540
+ pixel_values=pixel_values,
541
+ output_hidden_states=output_hidden_states,
542
+ return_dict=return_dict,
543
+ training=training,
544
+ )
545
+
546
+ if not return_dict:
547
+ return (outputs[0],) + outputs[1:]
548
+
549
+ return TFBaseModelOutputWithPooling(
550
+ last_hidden_state=outputs.last_hidden_state,
551
+ pooler_output=outputs.pooler_output,
552
+ hidden_states=outputs.hidden_states,
553
+ )
554
+
555
+ def build(self, input_shape=None):
556
+ if self.built:
557
+ return
558
+ self.built = True
559
+ if getattr(self, "convnext", None) is not None:
560
+ with tf.name_scope(self.convnext.name):
561
+ self.convnext.build(None)
562
+
563
+
564
+ @add_start_docstrings(
565
+ """
566
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
567
+ ImageNet.
568
+ """,
569
+ CONVNEXT_START_DOCSTRING,
570
+ )
571
+ class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
572
+ def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
573
+ super().__init__(config, *inputs, **kwargs)
574
+
575
+ self.num_labels = config.num_labels
576
+ self.convnext = TFConvNextMainLayer(config, name="convnext")
577
+
578
+ # Classifier head
579
+ self.classifier = keras.layers.Dense(
580
+ units=config.num_labels,
581
+ kernel_initializer=get_initializer(config.initializer_range),
582
+ bias_initializer="zeros",
583
+ name="classifier",
584
+ )
585
+ self.config = config
586
+
587
+ @unpack_inputs
588
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
589
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
590
+ def call(
591
+ self,
592
+ pixel_values: TFModelInputType | None = None,
593
+ output_hidden_states: Optional[bool] = None,
594
+ return_dict: Optional[bool] = None,
595
+ labels: np.ndarray | tf.Tensor | None = None,
596
+ training: Optional[bool] = False,
597
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
598
+ r"""
599
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
600
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
601
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
602
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
603
+
604
+ Returns:
605
+
606
+ Examples:
607
+
608
+ ```python
609
+ >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification
610
+ >>> import tensorflow as tf
611
+ >>> from PIL import Image
612
+ >>> import requests
613
+
614
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
615
+ >>> image = Image.open(requests.get(url, stream=True).raw)
616
+
617
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
618
+ >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
619
+
620
+ >>> inputs = image_processor(images=image, return_tensors="tf")
621
+ >>> outputs = model(**inputs)
622
+ >>> logits = outputs.logits
623
+ >>> # model predicts one of the 1000 ImageNet classes
624
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
625
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
626
+ ```"""
627
+ output_hidden_states = (
628
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
629
+ )
630
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
631
+
632
+ if pixel_values is None:
633
+ raise ValueError("You have to specify pixel_values")
634
+
635
+ outputs = self.convnext(
636
+ pixel_values,
637
+ output_hidden_states=output_hidden_states,
638
+ return_dict=return_dict,
639
+ training=training,
640
+ )
641
+
642
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
643
+
644
+ logits = self.classifier(pooled_output)
645
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
646
+
647
+ if not return_dict:
648
+ output = (logits,) + outputs[2:]
649
+ return ((loss,) + output) if loss is not None else output
650
+
651
+ return TFSequenceClassifierOutput(
652
+ loss=loss,
653
+ logits=logits,
654
+ hidden_states=outputs.hidden_states,
655
+ )
656
+
657
+ def build(self, input_shape=None):
658
+ if self.built:
659
+ return
660
+ self.built = True
661
+ if getattr(self, "convnext", None) is not None:
662
+ with tf.name_scope(self.convnext.name):
663
+ self.convnext.build(None)
664
+ if getattr(self, "classifier", None) is not None:
665
+ if hasattr(self.classifier, "name"):
666
+ with tf.name_scope(self.classifier.name):
667
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc ADDED
Binary file (5.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart50/__init__.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
17
+
18
+
19
+ _import_structure = {}
20
+
21
+ try:
22
+ if not is_sentencepiece_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"]
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"]
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ try:
40
+ if not is_sentencepiece_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ from .tokenization_mbart50 import MBart50Tokenizer
46
+
47
+ try:
48
+ if not is_tokenizers_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .tokenization_mbart50_fast import MBart50TokenizerFast
54
+
55
+ else:
56
+ import sys
57
+
58
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (922 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/tokenization_mbart50.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/tokenization_mbart50_fast.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip
34
+
35
+
36
+ class MBart50Tokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
39
+
40
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
41
+ this superclass for more information regarding those methods.
42
+
43
+ Args:
44
+ vocab_file (`str`):
45
+ Path to the vocabulary file.
46
+ src_lang (`str`, *optional*):
47
+ A string representing the source language.
48
+ tgt_lang (`str`, *optional*):
49
+ A string representing the target language.
50
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
51
+ The end of sequence token.
52
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
53
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
54
+ sequence classification or for a text and a question for question answering. It is also used as the last
55
+ token of a sequence built with special tokens.
56
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
57
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
58
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
59
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
60
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
61
+ token instead.
62
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
63
+ The token used for padding, for example when batching sequences of different lengths.
64
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
65
+ The token used for masking values. This is the token used when training this model with masked language
66
+ modeling. This is the token which the model will try to predict.
67
+ sp_model_kwargs (`dict`, *optional*):
68
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
69
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
70
+ to set:
71
+
72
+ - `enable_sampling`: Enable subword regularization.
73
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
74
+
75
+ - `nbest_size = {0,1}`: No sampling is performed.
76
+ - `nbest_size > 1`: samples from the nbest_size results.
77
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
78
+ using forward-filtering-and-backward-sampling algorithm.
79
+
80
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
81
+ BPE-dropout.
82
+
83
+ Examples:
84
+
85
+ ```python
86
+ >>> from transformers import MBart50Tokenizer
87
+
88
+ >>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
89
+ >>> src_text = " UN Chief Says There Is No Military Solution in Syria"
90
+ >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
91
+ >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
92
+ >>> # model(**model_inputs) should work
93
+ ```"""
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+ model_input_names = ["input_ids", "attention_mask"]
97
+
98
+ prefix_tokens: List[int] = []
99
+ suffix_tokens: List[int] = []
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_file,
104
+ src_lang=None,
105
+ tgt_lang=None,
106
+ eos_token="</s>",
107
+ sep_token="</s>",
108
+ cls_token="<s>",
109
+ unk_token="<unk>",
110
+ pad_token="<pad>",
111
+ mask_token="<mask>",
112
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
113
+ **kwargs,
114
+ ) -> None:
115
+ # Mask token behave like a normal word, i.e. include the space before it
116
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
117
+
118
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
119
+
120
+ kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or []
121
+ kwargs["additional_special_tokens"] += [
122
+ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
123
+ ]
124
+
125
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
126
+ self.sp_model.Load(str(vocab_file))
127
+ self.vocab_file = vocab_file
128
+
129
+ # Original fairseq vocab and spm vocab must be "aligned":
130
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
131
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
132
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
133
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
134
+
135
+ # Mimic fairseq token-to-id alignment for the first 4 token
136
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
137
+
138
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
139
+ self.fairseq_offset = 1
140
+
141
+ self.sp_model_size = len(self.sp_model)
142
+ self.lang_code_to_id = {
143
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
144
+ }
145
+ self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
146
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
147
+
148
+ self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
149
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
150
+
151
+ super().__init__(
152
+ src_lang=src_lang,
153
+ tgt_lang=tgt_lang,
154
+ eos_token=eos_token,
155
+ unk_token=unk_token,
156
+ sep_token=sep_token,
157
+ cls_token=cls_token,
158
+ pad_token=pad_token,
159
+ mask_token=mask_token,
160
+ sp_model_kwargs=self.sp_model_kwargs,
161
+ **kwargs,
162
+ )
163
+
164
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
165
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
166
+ self.tgt_lang = tgt_lang
167
+ self.set_src_lang_special_tokens(self._src_lang)
168
+
169
+ @property
170
+ def vocab_size(self) -> int:
171
+ return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
172
+
173
+ @property
174
+ def src_lang(self) -> str:
175
+ return self._src_lang
176
+
177
+ @src_lang.setter
178
+ def src_lang(self, new_src_lang: str) -> None:
179
+ self._src_lang = new_src_lang
180
+ self.set_src_lang_special_tokens(self._src_lang)
181
+
182
+ def __getstate__(self) -> Dict:
183
+ state = self.__dict__.copy()
184
+ state["sp_model"] = None
185
+ return state
186
+
187
+ def __setstate__(self, d: Dict) -> None:
188
+ self.__dict__ = d
189
+
190
+ # for backward compatibility
191
+ if not hasattr(self, "sp_model_kwargs"):
192
+ self.sp_model_kwargs = {}
193
+
194
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
195
+ self.sp_model.Load(self.vocab_file)
196
+
197
+ def get_vocab(self) -> Dict:
198
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
199
+ vocab.update(self.added_tokens_encoder)
200
+ return vocab
201
+
202
+ def _tokenize(self, text: str) -> List[str]:
203
+ return self.sp_model.encode(text, out_type=str)
204
+
205
+ def _convert_token_to_id(self, token: str) -> int:
206
+ """Converts a token (str) in an id using the vocab."""
207
+ if token in self.fairseq_tokens_to_ids:
208
+ return self.fairseq_tokens_to_ids[token]
209
+ spm_id = self.sp_model.PieceToId(token)
210
+
211
+ # Need to return unknown token if the SP model returned 0
212
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
213
+
214
+ def _convert_id_to_token(self, index: int) -> str:
215
+ """Converts an index (integer) in a token (str) using the vocab."""
216
+ if index in self.fairseq_ids_to_tokens:
217
+ return self.fairseq_ids_to_tokens[index]
218
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
219
+
220
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
221
+ def convert_tokens_to_string(self, tokens):
222
+ """Converts a sequence of tokens (string) in a single string."""
223
+ current_sub_tokens = []
224
+ out_string = ""
225
+ prev_is_special = False
226
+ for token in tokens:
227
+ # make sure that special tokens are not decoded using sentencepiece model
228
+ if token in self.all_special_tokens:
229
+ if not prev_is_special:
230
+ out_string += " "
231
+ out_string += self.sp_model.decode(current_sub_tokens) + token
232
+ prev_is_special = True
233
+ current_sub_tokens = []
234
+ else:
235
+ current_sub_tokens.append(token)
236
+ prev_is_special = False
237
+ out_string += self.sp_model.decode(current_sub_tokens)
238
+ return out_string.strip()
239
+
240
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
241
+ if not os.path.isdir(save_directory):
242
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
243
+ return
244
+ out_vocab_file = os.path.join(
245
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
246
+ )
247
+
248
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
249
+ copyfile(self.vocab_file, out_vocab_file)
250
+ elif not os.path.isfile(self.vocab_file):
251
+ with open(out_vocab_file, "wb") as fi:
252
+ content_spiece_model = self.sp_model.serialized_model_proto()
253
+ fi.write(content_spiece_model)
254
+
255
+ return (out_vocab_file,)
256
+
257
+ def get_special_tokens_mask(
258
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
259
+ ) -> List[int]:
260
+ """
261
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
262
+ special tokens using the tokenizer `prepare_for_model` method.
263
+
264
+ Args:
265
+ token_ids_0 (`List[int]`):
266
+ List of IDs.
267
+ token_ids_1 (`List[int]`, *optional*):
268
+ Optional second list of IDs for sequence pairs.
269
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
270
+ Whether or not the token list is already formatted with special tokens for the model.
271
+
272
+ Returns:
273
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
274
+ """
275
+
276
+ if already_has_special_tokens:
277
+ return super().get_special_tokens_mask(
278
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
279
+ )
280
+
281
+ prefix_ones = [1] * len(self.prefix_tokens)
282
+ suffix_ones = [1] * len(self.suffix_tokens)
283
+ if token_ids_1 is None:
284
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
285
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
286
+
287
+ def build_inputs_with_special_tokens(
288
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
289
+ ) -> List[int]:
290
+ """
291
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
292
+ adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:
293
+
294
+ - `input_ids` (for encoder) `[src_lang_code] X [eos]`
295
+ - `labels`: (for decoder) `[tgt_lang_code] X [eos]`
296
+
297
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
298
+ separator.
299
+
300
+ Args:
301
+ token_ids_0 (`List[int]`):
302
+ List of IDs to which the special tokens will be added.
303
+ token_ids_1 (`List[int]`, *optional*):
304
+ Optional second list of IDs for sequence pairs.
305
+
306
+ Returns:
307
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
308
+ """
309
+ if token_ids_1 is None:
310
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
311
+ # We don't expect to process pairs, but leave the pair logic for API consistency
312
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
313
+
314
+ def _build_translation_inputs(
315
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
316
+ ):
317
+ """Used by translation pipeline, to prepare inputs for the generate function"""
318
+ if src_lang is None or tgt_lang is None:
319
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
320
+ self.src_lang = src_lang
321
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
322
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
323
+ inputs["forced_bos_token_id"] = tgt_lang_id
324
+ return inputs
325
+
326
+ def prepare_seq2seq_batch(
327
+ self,
328
+ src_texts: List[str],
329
+ src_lang: str = "en_XX",
330
+ tgt_texts: Optional[List[str]] = None,
331
+ tgt_lang: str = "ro_RO",
332
+ **kwargs,
333
+ ) -> BatchEncoding:
334
+ self.src_lang = src_lang
335
+ self.tgt_lang = tgt_lang
336
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
337
+
338
+ def _switch_to_input_mode(self):
339
+ return self.set_src_lang_special_tokens(self.src_lang)
340
+
341
+ def _switch_to_target_mode(self):
342
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
343
+
344
+ def set_src_lang_special_tokens(self, src_lang: str) -> None:
345
+ """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
346
+ self.cur_lang_code_id = self.lang_code_to_id[src_lang]
347
+ self.prefix_tokens = [self.cur_lang_code_id]
348
+ self.suffix_tokens = [self.eos_token_id]
349
+
350
+ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
351
+ """Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."""
352
+ self.cur_lang_code_id = self.lang_code_to_id[tgt_lang]
353
+ self.prefix_tokens = [self.cur_lang_code_id]
354
+ self.suffix_tokens = [self.eos_token_id]
venv/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50_fast.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_mbart50 import MBart50Tokenizer
29
+ else:
30
+ MBart50Tokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
36
+
37
+
38
+ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip
39
+
40
+
41
+ class MBart50TokenizerFast(PreTrainedTokenizerFast):
42
+ """
43
+ Construct a "fast" MBART tokenizer for mBART-50 (backed by HuggingFace's *tokenizers* library). Based on
44
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
45
+
46
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
47
+ refer to this superclass for more information regarding those methods.
48
+
49
+ Args:
50
+ vocab_file (`str`):
51
+ Path to the vocabulary file.
52
+ src_lang (`str`, *optional*):
53
+ A string representing the source language.
54
+ tgt_lang (`str`, *optional*):
55
+ A string representing the target language.
56
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
57
+ The end of sequence token.
58
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
59
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
60
+ sequence classification or for a text and a question for question answering. It is also used as the last
61
+ token of a sequence built with special tokens.
62
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
63
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
64
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
65
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
66
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
67
+ token instead.
68
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
69
+ The token used for padding, for example when batching sequences of different lengths.
70
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
71
+ The token used for masking values. This is the token used when training this model with masked language
72
+ modeling. This is the token which the model will try to predict.
73
+
74
+ Examples:
75
+
76
+ ```python
77
+ >>> from transformers import MBart50TokenizerFast
78
+
79
+ >>> tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
80
+ >>> src_text = " UN Chief Says There Is No Military Solution in Syria"
81
+ >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
82
+ >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
83
+ >>> # model(**model_inputs) should work
84
+ ```"""
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+ model_input_names = ["input_ids", "attention_mask"]
88
+ slow_tokenizer_class = MBart50Tokenizer
89
+
90
+ prefix_tokens: List[int] = []
91
+ suffix_tokens: List[int] = []
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_file=None,
96
+ src_lang=None,
97
+ tgt_lang=None,
98
+ tokenizer_file=None,
99
+ eos_token="</s>",
100
+ sep_token="</s>",
101
+ cls_token="<s>",
102
+ unk_token="<unk>",
103
+ pad_token="<pad>",
104
+ mask_token="<mask>",
105
+ **kwargs,
106
+ ):
107
+ # Mask token behave like a normal word, i.e. include the space before it
108
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
109
+
110
+ kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or []
111
+ kwargs["additional_special_tokens"] += [
112
+ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
113
+ ]
114
+
115
+ super().__init__(
116
+ vocab_file,
117
+ src_lang=src_lang,
118
+ tgt_lang=tgt_lang,
119
+ tokenizer_file=tokenizer_file,
120
+ eos_token=eos_token,
121
+ sep_token=sep_token,
122
+ cls_token=cls_token,
123
+ unk_token=unk_token,
124
+ pad_token=pad_token,
125
+ mask_token=mask_token,
126
+ **kwargs,
127
+ )
128
+
129
+ self.vocab_file = vocab_file
130
+
131
+ self.lang_code_to_id = {
132
+ lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
133
+ }
134
+
135
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
136
+ self.tgt_lang = tgt_lang
137
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
138
+ self.set_src_lang_special_tokens(self._src_lang)
139
+
140
+ @property
141
+ def can_save_slow_tokenizer(self) -> bool:
142
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
143
+
144
+ @property
145
+ def src_lang(self) -> str:
146
+ return self._src_lang
147
+
148
+ @src_lang.setter
149
+ def src_lang(self, new_src_lang: str) -> None:
150
+ self._src_lang = new_src_lang
151
+ self.set_src_lang_special_tokens(self._src_lang)
152
+
153
+ def build_inputs_with_special_tokens(
154
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
155
+ ) -> List[int]:
156
+ """
157
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
158
+ adding special tokens. The special tokens depend on calling set_lang.
159
+
160
+ An MBART-50 sequence has the following format, where `X` represents the sequence:
161
+
162
+ - `input_ids` (for encoder) `[src_lang_code] X [eos]`
163
+ - `labels`: (for decoder) `[tgt_lang_code] X [eos]`
164
+
165
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
166
+ separator.
167
+
168
+ Args:
169
+ token_ids_0 (`List[int]`):
170
+ List of IDs to which the special tokens will be added.
171
+ token_ids_1 (`List[int]`, *optional*):
172
+ Optional second list of IDs for sequence pairs.
173
+
174
+ Returns:
175
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
176
+ """
177
+ if token_ids_1 is None:
178
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
179
+ # We don't expect to process pairs, but leave the pair logic for API consistency
180
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
181
+
182
+ def prepare_seq2seq_batch(
183
+ self,
184
+ src_texts: List[str],
185
+ src_lang: str = "en_XX",
186
+ tgt_texts: Optional[List[str]] = None,
187
+ tgt_lang: str = "ro_RO",
188
+ **kwargs,
189
+ ) -> BatchEncoding:
190
+ self.src_lang = src_lang
191
+ self.tgt_lang = tgt_lang
192
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
193
+
194
+ def _switch_to_input_mode(self):
195
+ return self.set_src_lang_special_tokens(self.src_lang)
196
+
197
+ def _switch_to_target_mode(self):
198
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
199
+
200
+ def set_src_lang_special_tokens(self, src_lang: str) -> None:
201
+ """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
202
+ self.cur_lang_code_id = self.convert_tokens_to_ids(src_lang)
203
+ self.prefix_tokens = [self.cur_lang_code_id]
204
+ self.suffix_tokens = [self.eos_token_id]
205
+
206
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
207
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
208
+
209
+ self._tokenizer.post_processor = processors.TemplateProcessing(
210
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
211
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
212
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
213
+ )
214
+
215
+ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
216
+ """Reset the special tokens to the target language setting. prefix=[src_lang_code] and suffix=[eos]."""
217
+ self.cur_lang_code_id = self.convert_tokens_to_ids(tgt_lang)
218
+ self.prefix_tokens = [self.cur_lang_code_id]
219
+ self.suffix_tokens = [self.eos_token_id]
220
+
221
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
222
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
223
+
224
+ self._tokenizer.post_processor = processors.TemplateProcessing(
225
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
226
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
227
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
228
+ )
229
+
230
+ def _build_translation_inputs(
231
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
232
+ ):
233
+ """Used by translation pipeline, to prepare inputs for the generate function"""
234
+ if src_lang is None or tgt_lang is None:
235
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
236
+ self.src_lang = src_lang
237
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
238
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
239
+ inputs["forced_bos_token_id"] = tgt_lang_id
240
+ return inputs
241
+
242
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
243
+ if not self.can_save_slow_tokenizer:
244
+ raise ValueError(
245
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
246
+ "tokenizer."
247
+ )
248
+
249
+ if not os.path.isdir(save_directory):
250
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
251
+ return
252
+ out_vocab_file = os.path.join(
253
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
254
+ )
255
+
256
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
257
+ copyfile(self.vocab_file, out_vocab_file)
258
+
259
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/pegasus/__init__.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"configuration_pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_pegasus"] = ["PegasusTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_pegasus_fast"] = ["PegasusTokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_pegasus"] = [
52
+ "PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
53
+ "PegasusForCausalLM",
54
+ "PegasusForConditionalGeneration",
55
+ "PegasusModel",
56
+ "PegasusPreTrainedModel",
57
+ ]
58
+
59
+ try:
60
+ if not is_tf_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ _import_structure["modeling_tf_pegasus"] = [
66
+ "TFPegasusForConditionalGeneration",
67
+ "TFPegasusModel",
68
+ "TFPegasusPreTrainedModel",
69
+ ]
70
+
71
+ try:
72
+ if not is_flax_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ _import_structure["modeling_flax_pegasus"] = [
78
+ "FlaxPegasusForConditionalGeneration",
79
+ "FlaxPegasusModel",
80
+ "FlaxPegasusPreTrainedModel",
81
+ ]
82
+
83
+
84
+ if TYPE_CHECKING:
85
+ from .configuration_pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig
86
+
87
+ try:
88
+ if not is_sentencepiece_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ from .tokenization_pegasus import PegasusTokenizer
94
+
95
+ try:
96
+ if not is_tokenizers_available():
97
+ raise OptionalDependencyNotAvailable()
98
+ except OptionalDependencyNotAvailable:
99
+ pass
100
+ else:
101
+ from .tokenization_pegasus_fast import PegasusTokenizerFast
102
+
103
+ try:
104
+ if not is_torch_available():
105
+ raise OptionalDependencyNotAvailable()
106
+ except OptionalDependencyNotAvailable:
107
+ pass
108
+ else:
109
+ from .modeling_pegasus import (
110
+ PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
111
+ PegasusForCausalLM,
112
+ PegasusForConditionalGeneration,
113
+ PegasusModel,
114
+ PegasusPreTrainedModel,
115
+ )
116
+
117
+ try:
118
+ if not is_tf_available():
119
+ raise OptionalDependencyNotAvailable()
120
+ except OptionalDependencyNotAvailable:
121
+ pass
122
+ else:
123
+ from .modeling_tf_pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel
124
+
125
+ try:
126
+ if not is_flax_available():
127
+ raise OptionalDependencyNotAvailable()
128
+ except OptionalDependencyNotAvailable:
129
+ pass
130
+ else:
131
+ from .modeling_flax_pegasus import (
132
+ FlaxPegasusForConditionalGeneration,
133
+ FlaxPegasusModel,
134
+ FlaxPegasusPreTrainedModel,
135
+ )
136
+
137
+ else:
138
+ import sys
139
+
140
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_tf_pegasus.cpython-310.pyc ADDED
Binary file (51.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pegasus/configuration_pegasus.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PEGASUS model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class PegasusConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
30
+ PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the PEGASUS
32
+ [google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50265):
40
+ Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
42
+ d_model (`int`, *optional*, defaults to 1024):
43
+ Dimensionality of the layers and the pooler layer.
44
+ encoder_layers (`int`, *optional*, defaults to 12):
45
+ Number of encoder layers.
46
+ decoder_layers (`int`, *optional*, defaults to 12):
47
+ Number of decoder layers.
48
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer decoder.
52
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
54
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
56
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
59
+ dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ attention_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the attention probabilities.
63
+ activation_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for activations inside the fully connected layer.
65
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
66
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
67
+ just in case (e.g., 512 or 1024 or 2048).
68
+ init_std (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
71
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
72
+ for more details.
73
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
74
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
75
+ for more details.
76
+ scale_embedding (`bool`, *optional*, defaults to `False`):
77
+ Scale embeddings by diving by sqrt(d_model).
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models)
80
+ forced_eos_token_id (`int`, *optional*, defaults to 1):
81
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
82
+ `eos_token_id`.
83
+
84
+ Example:
85
+
86
+ ```python
87
+ >>> from transformers import PegasusConfig, PegasusModel
88
+
89
+ >>> # Initializing a PEGASUS google/pegasus-large style configuration
90
+ >>> configuration = PegasusConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the google/pegasus-large style configuration
93
+ >>> model = PegasusModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "pegasus"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=50265,
106
+ max_position_embeddings=1024,
107
+ encoder_layers=12,
108
+ encoder_ffn_dim=4096,
109
+ encoder_attention_heads=16,
110
+ decoder_layers=12,
111
+ decoder_ffn_dim=4096,
112
+ decoder_attention_heads=16,
113
+ encoder_layerdrop=0.0,
114
+ decoder_layerdrop=0.0,
115
+ use_cache=True,
116
+ is_encoder_decoder=True,
117
+ activation_function="gelu",
118
+ d_model=1024,
119
+ dropout=0.1,
120
+ attention_dropout=0.0,
121
+ activation_dropout=0.0,
122
+ init_std=0.02,
123
+ decoder_start_token_id=0,
124
+ scale_embedding=False,
125
+ pad_token_id=0,
126
+ eos_token_id=1,
127
+ forced_eos_token_id=1,
128
+ **kwargs,
129
+ ):
130
+ self.vocab_size = vocab_size
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.d_model = d_model
133
+ self.encoder_ffn_dim = encoder_ffn_dim
134
+ self.encoder_layers = encoder_layers
135
+ self.encoder_attention_heads = encoder_attention_heads
136
+ self.decoder_ffn_dim = decoder_ffn_dim
137
+ self.decoder_layers = decoder_layers
138
+ self.decoder_attention_heads = decoder_attention_heads
139
+ self.dropout = dropout
140
+ self.attention_dropout = attention_dropout
141
+ self.activation_dropout = activation_dropout
142
+ self.activation_function = activation_function
143
+ self.init_std = init_std
144
+ self.encoder_layerdrop = encoder_layerdrop
145
+ self.decoder_layerdrop = decoder_layerdrop
146
+ self.use_cache = use_cache
147
+ self.num_hidden_layers = encoder_layers
148
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
149
+ super().__init__(
150
+ pad_token_id=pad_token_id,
151
+ eos_token_id=eos_token_id,
152
+ is_encoder_decoder=is_encoder_decoder,
153
+ decoder_start_token_id=decoder_start_token_id,
154
+ forced_eos_token_id=forced_eos_token_id,
155
+ **kwargs,
156
+ )
157
+
158
+ @property
159
+ def num_attention_heads(self) -> int:
160
+ return self.encoder_attention_heads
161
+
162
+ @property
163
+ def hidden_size(self) -> int:
164
+ return self.d_model
venv/lib/python3.10/site-packages/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Google and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict
20
+
21
+ import tensorflow as tf
22
+ import torch
23
+ from tqdm import tqdm
24
+
25
+ from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
26
+ from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
27
+
28
+
29
+ PATTERNS = [
30
+ # replace left string with right string to get the relevant state_dict key (identical state dict to bart)
31
+ ["memory_attention", "encoder_attn"],
32
+ ["attention", "attn"],
33
+ ["/", "."],
34
+ [".LayerNorm.gamma", "_layer_norm.weight"],
35
+ [".LayerNorm.beta", "_layer_norm.bias"],
36
+ ["r.layer_", "r.layers."],
37
+ ["output_proj", "out_proj"],
38
+ ["ffn.dense_1.", "fc2."],
39
+ ["ffn.dense.", "fc1."],
40
+ ["ffn_layer_norm", "final_layer_norm"],
41
+ ["kernel", "weight"],
42
+ ["encoder_layer_norm.", "encoder.layer_norm."],
43
+ ["decoder_layer_norm.", "decoder.layer_norm."],
44
+ ["embeddings.weights", "shared.weight"],
45
+ ]
46
+
47
+
48
+ def rename_state_dict_key(k):
49
+ for pegasus_name, hf_name in PATTERNS:
50
+ k = k.replace(pegasus_name, hf_name)
51
+ return k
52
+
53
+
54
+ # See appendix C of paper for all hyperparams
55
+
56
+
57
+ def convert_pegasus(tf_weights: dict, cfg_updates: dict) -> PegasusForConditionalGeneration:
58
+ cfg_kwargs = DEFAULTS.copy()
59
+ cfg_kwargs.update(cfg_updates)
60
+ cfg = PegasusConfig(**cfg_kwargs)
61
+ torch_model = PegasusForConditionalGeneration(cfg)
62
+ sd = torch_model.model.state_dict()
63
+ mapping = {}
64
+ for k, v in tf_weights.items():
65
+ new_k = rename_state_dict_key(k)
66
+ if new_k not in sd:
67
+ raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
68
+
69
+ if "dense" in k or "proj" in new_k:
70
+ v = v.T
71
+ mapping[new_k] = torch.tensor(v, dtype=sd[new_k].dtype)
72
+ assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
73
+ # make sure embedding.padding_idx is respected
74
+ mapping["shared.weight"][cfg.pad_token_id] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1])
75
+ mapping["encoder.embed_tokens.weight"] = mapping["shared.weight"]
76
+ mapping["decoder.embed_tokens.weight"] = mapping["shared.weight"]
77
+ empty_biases = {k: torch.zeros_like(v) for k, v in sd.items() if k.endswith("bias") and k not in mapping}
78
+ mapping.update(**empty_biases)
79
+ missing, extra = torch_model.model.load_state_dict(mapping, strict=False)
80
+ unexpected_missing = [
81
+ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
82
+ ]
83
+ assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
84
+ assert extra == [], f"no matches found for the following tf keys {extra}"
85
+ return torch_model
86
+
87
+
88
+ def get_tf_weights_as_numpy(path="./ckpt/aeslc/model.ckpt-32000") -> Dict:
89
+ init_vars = tf.train.list_variables(path)
90
+ tf_weights = {}
91
+ ignore_name = ["Adafactor", "global_step"]
92
+ for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
93
+ skip_key = any(pat in name for pat in ignore_name)
94
+ if skip_key:
95
+ continue
96
+ array = tf.train.load_variable(path, name)
97
+ tf_weights[name] = array
98
+ return tf_weights
99
+
100
+
101
+ def convert_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str):
102
+ # save tokenizer first
103
+ dataset = Path(ckpt_path).parent.name
104
+ desired_max_model_length = task_specific_params[f"summarization_{dataset}"]["max_position_embeddings"]
105
+ tok = PegasusTokenizer.from_pretrained("sshleifer/pegasus", model_max_length=desired_max_model_length)
106
+ assert tok.model_max_length == desired_max_model_length
107
+ tok.save_pretrained(save_dir)
108
+
109
+ # convert model
110
+ tf_weights = get_tf_weights_as_numpy(ckpt_path)
111
+ cfg_updates = task_specific_params[f"summarization_{dataset}"]
112
+ if dataset == "large":
113
+ cfg_updates["task_specific_params"] = task_specific_params
114
+ torch_model = convert_pegasus(tf_weights, cfg_updates)
115
+ torch_model.save_pretrained(save_dir)
116
+ sd = torch_model.state_dict()
117
+ sd.pop("model.decoder.embed_positions.weight")
118
+ sd.pop("model.encoder.embed_positions.weight")
119
+ torch.save(sd, Path(save_dir) / "pytorch_model.bin")
120
+
121
+
122
+ if __name__ == "__main__":
123
+ parser = argparse.ArgumentParser()
124
+ # Required parameters
125
+ parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
126
+ parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
127
+ args = parser.parse_args()
128
+ if args.save_dir is None:
129
+ dataset = Path(args.tf_ckpt_path).parent.name
130
+ args.save_dir = os.path.join("pegasus", dataset)
131
+ convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
venv/lib/python3.10/site-packages/transformers/models/pegasus/modeling_flax_pegasus.py ADDED
@@ -0,0 +1,1530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax PEGASUS model."""
16
+
17
+
18
+ import math
19
+ import random
20
+ from functools import partial
21
+ from typing import Callable, Optional, Tuple
22
+
23
+ import flax.linen as nn
24
+ import jax
25
+ import jax.numpy as jnp
26
+ import numpy as np
27
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
28
+ from flax.linen import combine_masks, make_causal_mask
29
+ from flax.linen.attention import dot_product_attention_weights
30
+ from flax.traverse_util import flatten_dict, unflatten_dict
31
+ from jax import lax
32
+ from jax.random import PRNGKey
33
+
34
+ from ...modeling_flax_outputs import (
35
+ FlaxBaseModelOutput,
36
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
37
+ FlaxCausalLMOutputWithCrossAttentions,
38
+ FlaxSeq2SeqLMOutput,
39
+ FlaxSeq2SeqModelOutput,
40
+ )
41
+ from ...modeling_flax_utils import (
42
+ ACT2FN,
43
+ FlaxPreTrainedModel,
44
+ add_start_docstrings_to_model_forward,
45
+ append_call_sample_docstring,
46
+ append_replace_return_docstrings,
47
+ overwrite_call_docstring,
48
+ )
49
+ from ...utils import add_start_docstrings, logging, replace_return_docstrings
50
+ from .configuration_pegasus import PegasusConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "google/pegasus-large"
56
+ _CONFIG_FOR_DOC = "PegasusConfig"
57
+
58
+ PEGASUS_START_DOCSTRING = r"""
59
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
60
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
61
+ etc.)
62
+
63
+ This model is also a Flax Linen
64
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
65
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
66
+
67
+ Finally, this model supports inherent JAX features such as:
68
+
69
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
70
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
71
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
72
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
73
+
74
+ Parameters:
75
+ config ([`PegasusConfig`]): Model configuration class with all the parameters of the model.
76
+ Initializing with a config file does not load the weights associated with the model, only the
77
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
78
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
79
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
80
+ `jax.numpy.bfloat16` (on TPUs).
81
+
82
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
83
+ specified all the computation will be performed with the given `dtype`.
84
+
85
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
86
+ parameters.**
87
+
88
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
89
+ [`~FlaxPreTrainedModel.to_bf16`].
90
+ """
91
+
92
+ PEGASUS_INPUTS_DOCSTRING = r"""
93
+ Args:
94
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
95
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
96
+ it.
97
+
98
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
99
+ [`PreTrainedTokenizer.__call__`] for details.
100
+
101
+ [What are input IDs?](../glossary#input-ids)
102
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
103
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
104
+
105
+ - 1 for tokens that are **not masked**,
106
+ - 0 for tokens that are **masked**.
107
+
108
+ [What are attention masks?](../glossary#attention-mask)
109
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
110
+ Indices of decoder input sequence tokens in the vocabulary.
111
+
112
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
113
+ [`PreTrainedTokenizer.__call__`] for details.
114
+
115
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
116
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
117
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
118
+ be used by default.
119
+
120
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
121
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
122
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
123
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
124
+ config.max_position_embeddings - 1]`.
125
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
126
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
127
+ range `[0, config.max_position_embeddings - 1]`.
128
+ output_attentions (`bool`, *optional*):
129
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
130
+ tensors for more detail.
131
+ output_hidden_states (`bool`, *optional*):
132
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
133
+ more detail.
134
+ return_dict (`bool`, *optional*):
135
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
136
+ """
137
+
138
+
139
+ PEGASUS_ENCODE_INPUTS_DOCSTRING = r"""
140
+ Args:
141
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
142
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
143
+ it.
144
+
145
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
146
+ [`PreTrainedTokenizer.__call__`] for details.
147
+
148
+ [What are input IDs?](../glossary#input-ids)
149
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
150
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
151
+
152
+ - 1 for tokens that are **not masked**,
153
+ - 0 for tokens that are **masked**.
154
+
155
+ [What are attention masks?](../glossary#attention-mask)
156
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
157
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
158
+ config.max_position_embeddings - 1]`.
159
+ output_attentions (`bool`, *optional*):
160
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
161
+ tensors for more detail.
162
+ output_hidden_states (`bool`, *optional*):
163
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
164
+ more detail.
165
+ return_dict (`bool`, *optional*):
166
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
167
+ """
168
+
169
+ PEGASUS_DECODE_INPUTS_DOCSTRING = r"""
170
+ Args:
171
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
172
+ Indices of decoder input sequence tokens in the vocabulary.
173
+
174
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
175
+ [`PreTrainedTokenizer.__call__`] for details.
176
+
177
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
178
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
179
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
180
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
181
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
182
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
183
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
184
+
185
+ - 1 for tokens that are **not masked**,
186
+ - 0 for tokens that are **masked**.
187
+
188
+ [What are attention masks?](../glossary#attention-mask)
189
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
190
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
191
+ be used by default.
192
+
193
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
194
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
195
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
196
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
197
+ range `[0, config.max_position_embeddings - 1]`.
198
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
199
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
200
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
201
+ output_attentions (`bool`, *optional*):
202
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
203
+ tensors for more detail.
204
+ output_hidden_states (`bool`, *optional*):
205
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
206
+ more detail.
207
+ return_dict (`bool`, *optional*):
208
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
209
+ """
210
+
211
+
212
+ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
213
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
214
+ """
215
+ Shift input ids one token to the right.
216
+ """
217
+ shifted_input_ids = jnp.zeros_like(input_ids)
218
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
219
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
220
+
221
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
222
+ return shifted_input_ids
223
+
224
+
225
+ # Copied from transformers.models.marian.modeling_flax_marian.create_sinusoidal_positions
226
+ def create_sinusoidal_positions(n_pos, dim):
227
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
228
+ sentinel = dim // 2 + dim % 2
229
+ out = np.zeros_like(position_enc)
230
+ out[:, 0:sentinel] = np.sin(position_enc[:, 0::2])
231
+ out[:, sentinel:] = np.cos(position_enc[:, 1::2])
232
+
233
+ return jnp.array(out)
234
+
235
+
236
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Pegasus
237
+ class FlaxPegasusAttention(nn.Module):
238
+ config: PegasusConfig
239
+ embed_dim: int
240
+ num_heads: int
241
+ dropout: float = 0.0
242
+ causal: bool = False
243
+ bias: bool = True
244
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
245
+
246
+ def setup(self) -> None:
247
+ self.head_dim = self.embed_dim // self.num_heads
248
+ if self.head_dim * self.num_heads != self.embed_dim:
249
+ raise ValueError(
250
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
251
+ f" and `num_heads`: {self.num_heads})."
252
+ )
253
+
254
+ dense = partial(
255
+ nn.Dense,
256
+ self.embed_dim,
257
+ use_bias=self.bias,
258
+ dtype=self.dtype,
259
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
260
+ )
261
+
262
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
263
+ self.out_proj = dense()
264
+
265
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
266
+
267
+ if self.causal:
268
+ self.causal_mask = make_causal_mask(
269
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
270
+ )
271
+
272
+ def _split_heads(self, hidden_states):
273
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
274
+
275
+ def _merge_heads(self, hidden_states):
276
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
277
+
278
+ @nn.compact
279
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
280
+ """
281
+ This function takes projected key, value states from a single input token and concatenates the states to cached
282
+ states from previous steps. This function is slighly adapted from the official Flax repository:
283
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
284
+ """
285
+ # detect if we're initializing by absence of existing cache data.
286
+ is_initialized = self.has_variable("cache", "cached_key")
287
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
288
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
289
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
290
+
291
+ if is_initialized:
292
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
293
+ # update key, value caches with our new 1d spatial slices
294
+ cur_index = cache_index.value
295
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
296
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
297
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
298
+ cached_key.value = key
299
+ cached_value.value = value
300
+ num_updated_cache_vectors = query.shape[1]
301
+ cache_index.value = cache_index.value + num_updated_cache_vectors
302
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
303
+ pad_mask = jnp.broadcast_to(
304
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
305
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
306
+ )
307
+ attention_mask = combine_masks(pad_mask, attention_mask)
308
+ return key, value, attention_mask
309
+
310
+ def __call__(
311
+ self,
312
+ hidden_states: jnp.ndarray,
313
+ key_value_states: Optional[jnp.ndarray] = None,
314
+ attention_mask: Optional[jnp.ndarray] = None,
315
+ init_cache: bool = False,
316
+ deterministic: bool = True,
317
+ ) -> Tuple[jnp.ndarray]:
318
+ """Input shape: Batch x Time x Channel"""
319
+
320
+ # if key_value_states are provided this layer is used as a cross-attention layer
321
+ # for the decoder
322
+ is_cross_attention = key_value_states is not None
323
+ batch_size = hidden_states.shape[0]
324
+
325
+ # get query proj
326
+ query_states = self.q_proj(hidden_states)
327
+ # get key, value proj
328
+ if is_cross_attention:
329
+ # cross_attentions
330
+ key_states = self.k_proj(key_value_states)
331
+ value_states = self.v_proj(key_value_states)
332
+ else:
333
+ # self_attention
334
+ key_states = self.k_proj(hidden_states)
335
+ value_states = self.v_proj(hidden_states)
336
+
337
+ query_states = self._split_heads(query_states)
338
+ key_states = self._split_heads(key_states)
339
+ value_states = self._split_heads(value_states)
340
+
341
+ # handle cache prepare causal attention mask
342
+ if self.causal:
343
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
344
+ if self.has_variable("cache", "cached_key"):
345
+ mask_shift = self.variables["cache"]["cache_index"]
346
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
347
+ causal_mask = lax.dynamic_slice(
348
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
349
+ )
350
+ else:
351
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
352
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
353
+
354
+ # combine masks if needed
355
+ if attention_mask is not None and self.causal:
356
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
357
+ attention_mask = combine_masks(attention_mask, causal_mask)
358
+ elif self.causal:
359
+ attention_mask = causal_mask
360
+ elif attention_mask is not None:
361
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
362
+
363
+ # During fast autoregressive decoding, we feed one position at a time,
364
+ # and cache the keys and values step by step.
365
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
366
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
367
+ key_states, value_states, query_states, attention_mask
368
+ )
369
+
370
+ # Convert the boolean attention mask to an attention bias.
371
+ if attention_mask is not None:
372
+ # attention mask in the form of attention bias
373
+ attention_bias = lax.select(
374
+ attention_mask > 0,
375
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
376
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
377
+ )
378
+ else:
379
+ attention_bias = None
380
+
381
+ dropout_rng = None
382
+ if not deterministic and self.dropout > 0.0:
383
+ dropout_rng = self.make_rng("dropout")
384
+
385
+ attn_weights = dot_product_attention_weights(
386
+ query_states,
387
+ key_states,
388
+ bias=attention_bias,
389
+ dropout_rng=dropout_rng,
390
+ dropout_rate=self.dropout,
391
+ broadcast_dropout=True,
392
+ deterministic=deterministic,
393
+ dtype=self.dtype,
394
+ precision=None,
395
+ )
396
+
397
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
398
+ attn_output = self._merge_heads(attn_output)
399
+ attn_output = self.out_proj(attn_output)
400
+
401
+ return attn_output, attn_weights
402
+
403
+
404
+ # Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartEncoderLayer with MBart->Pegasus
405
+ class FlaxPegasusEncoderLayer(nn.Module):
406
+ config: PegasusConfig
407
+ dtype: jnp.dtype = jnp.float32
408
+
409
+ def setup(self) -> None:
410
+ self.embed_dim = self.config.d_model
411
+ self.self_attn = FlaxPegasusAttention(
412
+ config=self.config,
413
+ embed_dim=self.embed_dim,
414
+ num_heads=self.config.encoder_attention_heads,
415
+ dropout=self.config.attention_dropout,
416
+ dtype=self.dtype,
417
+ )
418
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
419
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
420
+ self.activation_fn = ACT2FN[self.config.activation_function]
421
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
422
+ self.fc1 = nn.Dense(
423
+ self.config.encoder_ffn_dim,
424
+ dtype=self.dtype,
425
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
426
+ )
427
+ self.fc2 = nn.Dense(
428
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
429
+ )
430
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
431
+
432
+ def __call__(
433
+ self,
434
+ hidden_states: jnp.ndarray,
435
+ attention_mask: jnp.ndarray,
436
+ output_attentions: bool = True,
437
+ deterministic: bool = True,
438
+ ) -> Tuple[jnp.ndarray]:
439
+ residual = hidden_states
440
+ hidden_states = self.self_attn_layer_norm(hidden_states)
441
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
442
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
443
+ hidden_states = residual + hidden_states
444
+
445
+ residual = hidden_states
446
+ hidden_states = self.final_layer_norm(hidden_states)
447
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
448
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
449
+ hidden_states = self.fc2(hidden_states)
450
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
451
+ hidden_states = residual + hidden_states
452
+
453
+ outputs = (hidden_states,)
454
+
455
+ if output_attentions:
456
+ outputs += (attn_weights,)
457
+
458
+ return outputs
459
+
460
+
461
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Pegasus
462
+ class FlaxPegasusEncoderLayerCollection(nn.Module):
463
+ config: PegasusConfig
464
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
465
+
466
+ def setup(self):
467
+ self.layers = [
468
+ FlaxPegasusEncoderLayer(self.config, name=str(i), dtype=self.dtype)
469
+ for i in range(self.config.encoder_layers)
470
+ ]
471
+ self.layerdrop = self.config.encoder_layerdrop
472
+
473
+ def __call__(
474
+ self,
475
+ hidden_states,
476
+ attention_mask,
477
+ deterministic: bool = True,
478
+ output_attentions: bool = False,
479
+ output_hidden_states: bool = False,
480
+ return_dict: bool = True,
481
+ ):
482
+ all_attentions = () if output_attentions else None
483
+ all_hidden_states = () if output_hidden_states else None
484
+
485
+ for encoder_layer in self.layers:
486
+ if output_hidden_states:
487
+ all_hidden_states = all_hidden_states + (hidden_states,)
488
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
489
+ dropout_probability = random.uniform(0, 1)
490
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
491
+ layer_outputs = (None, None)
492
+ else:
493
+ layer_outputs = encoder_layer(
494
+ hidden_states,
495
+ attention_mask,
496
+ output_attentions,
497
+ deterministic,
498
+ )
499
+ hidden_states = layer_outputs[0]
500
+ if output_attentions:
501
+ all_attentions = all_attentions + (layer_outputs[1],)
502
+
503
+ if output_hidden_states:
504
+ all_hidden_states += (hidden_states,)
505
+
506
+ outputs = (hidden_states, all_hidden_states, all_attentions)
507
+
508
+ if not return_dict:
509
+ return tuple(v for v in outputs if v is not None)
510
+
511
+ return FlaxBaseModelOutput(
512
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
513
+ )
514
+
515
+
516
+ # Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer with MBart->Pegasus
517
+ class FlaxPegasusDecoderLayer(nn.Module):
518
+ config: PegasusConfig
519
+ dtype: jnp.dtype = jnp.float32
520
+
521
+ def setup(self) -> None:
522
+ self.embed_dim = self.config.d_model
523
+ self.self_attn = FlaxPegasusAttention(
524
+ config=self.config,
525
+ embed_dim=self.embed_dim,
526
+ num_heads=self.config.decoder_attention_heads,
527
+ dropout=self.config.attention_dropout,
528
+ causal=True,
529
+ dtype=self.dtype,
530
+ )
531
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
532
+ self.activation_fn = ACT2FN[self.config.activation_function]
533
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
534
+
535
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
536
+ self.encoder_attn = FlaxPegasusAttention(
537
+ config=self.config,
538
+ embed_dim=self.embed_dim,
539
+ num_heads=self.config.decoder_attention_heads,
540
+ dropout=self.config.attention_dropout,
541
+ dtype=self.dtype,
542
+ )
543
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
544
+ self.fc1 = nn.Dense(
545
+ self.config.decoder_ffn_dim,
546
+ dtype=self.dtype,
547
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
548
+ )
549
+ self.fc2 = nn.Dense(
550
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
551
+ )
552
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
553
+
554
+ def __call__(
555
+ self,
556
+ hidden_states: jnp.ndarray,
557
+ attention_mask: jnp.ndarray,
558
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
559
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
560
+ init_cache: bool = False,
561
+ output_attentions: bool = True,
562
+ deterministic: bool = True,
563
+ ) -> Tuple[jnp.ndarray]:
564
+ residual = hidden_states
565
+ hidden_states = self.self_attn_layer_norm(hidden_states)
566
+
567
+ # Self Attention
568
+ hidden_states, self_attn_weights = self.self_attn(
569
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
570
+ )
571
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
572
+ hidden_states = residual + hidden_states
573
+
574
+ # Cross-Attention Block
575
+ cross_attn_weights = None
576
+ if encoder_hidden_states is not None:
577
+ residual = hidden_states
578
+
579
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
580
+ hidden_states, cross_attn_weights = self.encoder_attn(
581
+ hidden_states=hidden_states,
582
+ key_value_states=encoder_hidden_states,
583
+ attention_mask=encoder_attention_mask,
584
+ )
585
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
586
+ hidden_states = residual + hidden_states
587
+
588
+ # Fully Connected
589
+ residual = hidden_states
590
+ hidden_states = self.final_layer_norm(hidden_states)
591
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
592
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
593
+ hidden_states = self.fc2(hidden_states)
594
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
595
+ hidden_states = residual + hidden_states
596
+
597
+ outputs = (hidden_states,)
598
+
599
+ if output_attentions:
600
+ outputs += (self_attn_weights, cross_attn_weights)
601
+
602
+ return outputs
603
+
604
+
605
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Pegasus
606
+ class FlaxPegasusDecoderLayerCollection(nn.Module):
607
+ config: PegasusConfig
608
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
609
+
610
+ def setup(self):
611
+ self.layers = [
612
+ FlaxPegasusDecoderLayer(self.config, name=str(i), dtype=self.dtype)
613
+ for i in range(self.config.decoder_layers)
614
+ ]
615
+ self.layerdrop = self.config.decoder_layerdrop
616
+
617
+ def __call__(
618
+ self,
619
+ hidden_states,
620
+ attention_mask,
621
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
622
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
623
+ deterministic: bool = True,
624
+ init_cache: bool = False,
625
+ output_attentions: bool = False,
626
+ output_hidden_states: bool = False,
627
+ return_dict: bool = True,
628
+ ):
629
+ # decoder layers
630
+ all_hidden_states = () if output_hidden_states else None
631
+ all_self_attns = () if output_attentions else None
632
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
633
+
634
+ for decoder_layer in self.layers:
635
+ if output_hidden_states:
636
+ all_hidden_states += (hidden_states,)
637
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
638
+ dropout_probability = random.uniform(0, 1)
639
+ if not deterministic and (dropout_probability < self.layerdrop):
640
+ layer_outputs = (None, None, None)
641
+ else:
642
+ layer_outputs = decoder_layer(
643
+ hidden_states,
644
+ attention_mask=attention_mask,
645
+ encoder_hidden_states=encoder_hidden_states,
646
+ encoder_attention_mask=encoder_attention_mask,
647
+ init_cache=init_cache,
648
+ output_attentions=output_attentions,
649
+ deterministic=deterministic,
650
+ )
651
+
652
+ hidden_states = layer_outputs[0]
653
+ if output_attentions:
654
+ all_self_attns += (layer_outputs[1],)
655
+
656
+ if encoder_hidden_states is not None:
657
+ all_cross_attentions += (layer_outputs[2],)
658
+
659
+ # add hidden states from the last decoder layer
660
+ if output_hidden_states:
661
+ all_hidden_states += (hidden_states,)
662
+
663
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
664
+
665
+ if not return_dict:
666
+ return tuple(v for v in outputs if v is not None)
667
+
668
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
669
+ last_hidden_state=hidden_states,
670
+ hidden_states=all_hidden_states,
671
+ attentions=all_self_attns,
672
+ cross_attentions=all_cross_attentions,
673
+ )
674
+
675
+
676
+ class FlaxPegasusEncoder(nn.Module):
677
+ config: PegasusConfig
678
+ embed_tokens: nn.Embed
679
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
680
+
681
+ def setup(self):
682
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
683
+
684
+ embed_dim = self.config.d_model
685
+ self.padding_idx = self.config.pad_token_id
686
+ self.max_source_positions = self.config.max_position_embeddings
687
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
688
+
689
+ self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim)
690
+ self.layers = FlaxPegasusEncoderLayerCollection(self.config, self.dtype)
691
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
692
+
693
+ def __call__(
694
+ self,
695
+ input_ids,
696
+ attention_mask,
697
+ position_ids,
698
+ output_attentions: bool = False,
699
+ output_hidden_states: bool = False,
700
+ return_dict: bool = True,
701
+ deterministic: bool = True,
702
+ ):
703
+ input_shape = input_ids.shape
704
+ input_ids = input_ids.reshape(-1, input_shape[-1])
705
+
706
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
707
+
708
+ # embed positions
709
+ embed_pos = jnp.take(self.embed_positions, position_ids, axis=0)
710
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
711
+ embed_pos = embed_pos.astype(inputs_embeds.dtype)
712
+
713
+ hidden_states = inputs_embeds + embed_pos
714
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
715
+ outputs = self.layers(
716
+ hidden_states,
717
+ attention_mask,
718
+ deterministic=deterministic,
719
+ output_attentions=output_attentions,
720
+ output_hidden_states=output_hidden_states,
721
+ return_dict=return_dict,
722
+ )
723
+ last_hidden_state = outputs[0]
724
+ last_hidden_state = self.layer_norm(last_hidden_state)
725
+
726
+ # update the last element in `hidden_states` after applying `layernorm` above
727
+ hidden_states = None
728
+ if output_hidden_states:
729
+ hidden_states = outputs[1]
730
+ hidden_states = hidden_states[:-1] + (last_hidden_state,)
731
+
732
+ if not return_dict:
733
+ outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
734
+ return tuple(v for v in outputs if v is not None)
735
+
736
+ return FlaxBaseModelOutput(
737
+ last_hidden_state=last_hidden_state,
738
+ hidden_states=hidden_states,
739
+ attentions=outputs.attentions,
740
+ )
741
+
742
+
743
+ class FlaxPegasusDecoder(nn.Module):
744
+ config: PegasusConfig
745
+ embed_tokens: nn.Embed
746
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
747
+
748
+ def setup(self):
749
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
750
+
751
+ embed_dim = self.config.d_model
752
+ self.padding_idx = self.config.pad_token_id
753
+ self.max_target_positions = self.config.max_position_embeddings
754
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
755
+
756
+ self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim)
757
+
758
+ self.layers = FlaxPegasusDecoderLayerCollection(self.config, self.dtype)
759
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
760
+
761
+ def __call__(
762
+ self,
763
+ input_ids,
764
+ attention_mask,
765
+ position_ids,
766
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
767
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
768
+ init_cache: bool = False,
769
+ output_attentions: bool = False,
770
+ output_hidden_states: bool = False,
771
+ return_dict: bool = True,
772
+ deterministic: bool = True,
773
+ ):
774
+ input_shape = input_ids.shape
775
+ input_ids = input_ids.reshape(-1, input_shape[-1])
776
+
777
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
778
+
779
+ # embed positions
780
+ positions = jnp.take(self.embed_positions, position_ids, axis=0)
781
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
782
+ positions = positions.astype(inputs_embeds.dtype)
783
+
784
+ hidden_states = inputs_embeds + positions
785
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
786
+ outputs = self.layers(
787
+ hidden_states,
788
+ attention_mask,
789
+ encoder_hidden_states,
790
+ encoder_attention_mask,
791
+ deterministic=deterministic,
792
+ init_cache=init_cache,
793
+ output_attentions=output_attentions,
794
+ output_hidden_states=output_hidden_states,
795
+ return_dict=return_dict,
796
+ )
797
+ last_hidden_state = outputs[0]
798
+ last_hidden_state = self.layer_norm(last_hidden_state)
799
+
800
+ # update the last element in `hidden_states` after applying `layernorm` above
801
+ hidden_states = None
802
+ if output_hidden_states:
803
+ hidden_states = outputs[1]
804
+ hidden_states = hidden_states[:-1] + (last_hidden_state,)
805
+
806
+ if not return_dict:
807
+ outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
808
+ return tuple(v for v in outputs if v is not None)
809
+
810
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
811
+ last_hidden_state=last_hidden_state,
812
+ hidden_states=hidden_states,
813
+ attentions=outputs.attentions,
814
+ cross_attentions=outputs.cross_attentions,
815
+ )
816
+
817
+
818
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->Pegasus
819
+ class FlaxPegasusModule(nn.Module):
820
+ config: PegasusConfig
821
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
822
+
823
+ def setup(self):
824
+ self.shared = nn.Embed(
825
+ self.config.vocab_size,
826
+ self.config.d_model,
827
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
828
+ dtype=self.dtype,
829
+ )
830
+
831
+ self.encoder = FlaxPegasusEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
832
+ self.decoder = FlaxPegasusDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
833
+
834
+ def _get_encoder_module(self):
835
+ return self.encoder
836
+
837
+ def _get_decoder_module(self):
838
+ return self.decoder
839
+
840
+ def __call__(
841
+ self,
842
+ input_ids,
843
+ attention_mask,
844
+ decoder_input_ids,
845
+ decoder_attention_mask,
846
+ position_ids,
847
+ decoder_position_ids,
848
+ output_attentions: bool = False,
849
+ output_hidden_states: bool = False,
850
+ return_dict: bool = True,
851
+ deterministic: bool = True,
852
+ ):
853
+ encoder_outputs = self.encoder(
854
+ input_ids=input_ids,
855
+ attention_mask=attention_mask,
856
+ position_ids=position_ids,
857
+ output_attentions=output_attentions,
858
+ output_hidden_states=output_hidden_states,
859
+ return_dict=return_dict,
860
+ deterministic=deterministic,
861
+ )
862
+
863
+ decoder_outputs = self.decoder(
864
+ input_ids=decoder_input_ids,
865
+ attention_mask=decoder_attention_mask,
866
+ position_ids=decoder_position_ids,
867
+ encoder_hidden_states=encoder_outputs[0],
868
+ encoder_attention_mask=attention_mask,
869
+ output_attentions=output_attentions,
870
+ output_hidden_states=output_hidden_states,
871
+ return_dict=return_dict,
872
+ deterministic=deterministic,
873
+ )
874
+
875
+ if not return_dict:
876
+ return decoder_outputs + encoder_outputs
877
+
878
+ return FlaxSeq2SeqModelOutput(
879
+ last_hidden_state=decoder_outputs.last_hidden_state,
880
+ decoder_hidden_states=decoder_outputs.hidden_states,
881
+ decoder_attentions=decoder_outputs.attentions,
882
+ cross_attentions=decoder_outputs.cross_attentions,
883
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
884
+ encoder_hidden_states=encoder_outputs.hidden_states,
885
+ encoder_attentions=encoder_outputs.attentions,
886
+ )
887
+
888
+
889
+ class FlaxPegasusPreTrainedModel(FlaxPreTrainedModel):
890
+ config_class = PegasusConfig
891
+ base_model_prefix: str = "model"
892
+ module_class: nn.Module = None
893
+
894
+ def __init__(
895
+ self,
896
+ config: PegasusConfig,
897
+ input_shape: Tuple[int] = (1, 1),
898
+ seed: int = 0,
899
+ dtype: jnp.dtype = jnp.float32,
900
+ _do_init: bool = True,
901
+ **kwargs,
902
+ ):
903
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
904
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
905
+
906
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
907
+ # init input tensors
908
+ input_ids = jnp.zeros(input_shape, dtype="i4")
909
+ attention_mask = jnp.ones_like(input_ids)
910
+ decoder_input_ids = input_ids
911
+ decoder_attention_mask = jnp.ones_like(input_ids)
912
+
913
+ batch_size, sequence_length = input_ids.shape
914
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
915
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
916
+
917
+ params_rng, dropout_rng = jax.random.split(rng)
918
+ rngs = {"params": params_rng, "dropout": dropout_rng}
919
+
920
+ random_params = self.module.init(
921
+ rngs,
922
+ input_ids,
923
+ attention_mask,
924
+ decoder_input_ids,
925
+ decoder_attention_mask,
926
+ position_ids,
927
+ decoder_position_ids,
928
+ )["params"]
929
+
930
+ if params is not None:
931
+ random_params = flatten_dict(unfreeze(random_params))
932
+ params = flatten_dict(unfreeze(params))
933
+ for missing_key in self._missing_keys:
934
+ params[missing_key] = random_params[missing_key]
935
+ self._missing_keys = set()
936
+ return freeze(unflatten_dict(params))
937
+ else:
938
+ return random_params
939
+
940
+ def init_cache(self, batch_size, max_length, encoder_outputs):
941
+ r"""
942
+ Args:
943
+ batch_size (`int`):
944
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
945
+ max_length (`int`):
946
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
947
+ cache.
948
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
949
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
950
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
951
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
952
+ cross-attention of the decoder.
953
+ """
954
+ # init input variables to retrieve cache
955
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
956
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
957
+ decoder_position_ids = jnp.broadcast_to(
958
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
959
+ )
960
+
961
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
962
+ decoder_module = module._get_decoder_module()
963
+ return decoder_module(
964
+ decoder_input_ids,
965
+ decoder_attention_mask,
966
+ decoder_position_ids,
967
+ **kwargs,
968
+ )
969
+
970
+ init_variables = self.module.init(
971
+ jax.random.PRNGKey(0),
972
+ decoder_input_ids=decoder_input_ids,
973
+ decoder_attention_mask=decoder_attention_mask,
974
+ decoder_position_ids=decoder_position_ids,
975
+ encoder_hidden_states=encoder_outputs[0],
976
+ init_cache=True,
977
+ method=_decoder_forward, # we only need to call the decoder to init the cache
978
+ )
979
+ return unfreeze(init_variables["cache"])
980
+
981
+ @add_start_docstrings(PEGASUS_ENCODE_INPUTS_DOCSTRING)
982
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=PegasusConfig)
983
+ def encode(
984
+ self,
985
+ input_ids: jnp.ndarray,
986
+ attention_mask: Optional[jnp.ndarray] = None,
987
+ position_ids: Optional[jnp.ndarray] = None,
988
+ output_attentions: Optional[bool] = None,
989
+ output_hidden_states: Optional[bool] = None,
990
+ return_dict: Optional[bool] = None,
991
+ train: bool = False,
992
+ params: dict = None,
993
+ dropout_rng: PRNGKey = None,
994
+ ):
995
+ r"""
996
+ Returns:
997
+
998
+ Example:
999
+
1000
+ ```python
1001
+ >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
1002
+
1003
+ >>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
1004
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
1005
+
1006
+ >>> text = "My friends are cool but they eat too many carbs."
1007
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1008
+ >>> encoder_outputs = model.encode(**inputs)
1009
+ ```"""
1010
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1011
+ output_hidden_states = (
1012
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1013
+ )
1014
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1015
+
1016
+ if attention_mask is None:
1017
+ attention_mask = jnp.ones_like(input_ids)
1018
+ if position_ids is None:
1019
+ batch_size, sequence_length = input_ids.shape
1020
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1021
+
1022
+ # Handle any PRNG if needed
1023
+ rngs = {}
1024
+ if dropout_rng is not None:
1025
+ rngs["dropout"] = dropout_rng
1026
+
1027
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
1028
+ encode_module = module._get_encoder_module()
1029
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
1030
+
1031
+ return self.module.apply(
1032
+ {"params": params or self.params},
1033
+ input_ids=jnp.array(input_ids, dtype="i4"),
1034
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1035
+ position_ids=jnp.array(position_ids, dtype="i4"),
1036
+ output_attentions=output_attentions,
1037
+ output_hidden_states=output_hidden_states,
1038
+ return_dict=return_dict,
1039
+ deterministic=not train,
1040
+ rngs=rngs,
1041
+ method=_encoder_forward,
1042
+ )
1043
+
1044
+ @add_start_docstrings(PEGASUS_DECODE_INPUTS_DOCSTRING)
1045
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=PegasusConfig)
1046
+ def decode(
1047
+ self,
1048
+ decoder_input_ids,
1049
+ encoder_outputs,
1050
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1051
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1052
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1053
+ past_key_values: dict = None,
1054
+ output_attentions: Optional[bool] = None,
1055
+ output_hidden_states: Optional[bool] = None,
1056
+ return_dict: Optional[bool] = None,
1057
+ train: bool = False,
1058
+ params: dict = None,
1059
+ dropout_rng: PRNGKey = None,
1060
+ ):
1061
+ r"""
1062
+ Returns:
1063
+
1064
+ Example:
1065
+
1066
+ ```python
1067
+ >>> import jax.numpy as jnp
1068
+ >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
1069
+
1070
+ >>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
1071
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
1072
+
1073
+ >>> text = "My friends are cool but they eat too many carbs."
1074
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1075
+ >>> encoder_outputs = model.encode(**inputs)
1076
+
1077
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1078
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1079
+
1080
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1081
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
1082
+ ```"""
1083
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1084
+ output_hidden_states = (
1085
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1086
+ )
1087
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1088
+
1089
+ encoder_hidden_states = encoder_outputs[0]
1090
+ if encoder_attention_mask is None:
1091
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1092
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1093
+
1094
+ batch_size, sequence_length = decoder_input_ids.shape
1095
+ if decoder_attention_mask is None:
1096
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1097
+
1098
+ if decoder_position_ids is None:
1099
+ if past_key_values is not None:
1100
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1101
+
1102
+ decoder_position_ids = jnp.broadcast_to(
1103
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1104
+ )
1105
+
1106
+ # Handle any PRNG if needed
1107
+ rngs = {}
1108
+ if dropout_rng is not None:
1109
+ rngs["dropout"] = dropout_rng
1110
+
1111
+ inputs = {"params": params or self.params}
1112
+
1113
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1114
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1115
+ # it can be changed by FlaxPegasusAttention module
1116
+ if past_key_values:
1117
+ inputs["cache"] = past_key_values
1118
+ mutable = ["cache"]
1119
+ else:
1120
+ mutable = False
1121
+
1122
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1123
+ decoder_module = module._get_decoder_module()
1124
+ return decoder_module(
1125
+ decoder_input_ids,
1126
+ decoder_attention_mask,
1127
+ decoder_position_ids,
1128
+ **kwargs,
1129
+ )
1130
+
1131
+ outputs = self.module.apply(
1132
+ inputs,
1133
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1134
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1135
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1136
+ encoder_hidden_states=encoder_hidden_states,
1137
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1138
+ output_attentions=output_attentions,
1139
+ output_hidden_states=output_hidden_states,
1140
+ return_dict=return_dict,
1141
+ deterministic=not train,
1142
+ rngs=rngs,
1143
+ mutable=mutable,
1144
+ method=_decoder_forward,
1145
+ )
1146
+
1147
+ # add updated cache to model output
1148
+ if past_key_values is not None and return_dict:
1149
+ outputs, past = outputs
1150
+ outputs["past_key_values"] = unfreeze(past["cache"])
1151
+ return outputs
1152
+ elif past_key_values is not None and not return_dict:
1153
+ outputs, past = outputs
1154
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1155
+
1156
+ return outputs
1157
+
1158
+ @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
1159
+ def __call__(
1160
+ self,
1161
+ input_ids: jnp.ndarray,
1162
+ attention_mask: Optional[jnp.ndarray] = None,
1163
+ decoder_input_ids: Optional[jnp.ndarray] = None,
1164
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1165
+ position_ids: Optional[jnp.ndarray] = None,
1166
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ train: bool = False,
1171
+ params: dict = None,
1172
+ dropout_rng: PRNGKey = None,
1173
+ ):
1174
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1175
+ output_hidden_states = (
1176
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1177
+ )
1178
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1179
+
1180
+ # prepare encoder inputs
1181
+ if attention_mask is None:
1182
+ attention_mask = jnp.ones_like(input_ids)
1183
+ if position_ids is None:
1184
+ batch_size, sequence_length = input_ids.shape
1185
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1186
+
1187
+ # prepare decoder inputs
1188
+ if decoder_input_ids is None:
1189
+ decoder_input_ids = shift_tokens_right(
1190
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
1191
+ )
1192
+ if decoder_attention_mask is None:
1193
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1194
+ if decoder_position_ids is None:
1195
+ batch_size, sequence_length = decoder_input_ids.shape
1196
+ decoder_position_ids = jnp.broadcast_to(
1197
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1198
+ )
1199
+
1200
+ # Handle any PRNG if needed
1201
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1202
+
1203
+ return self.module.apply(
1204
+ {"params": params or self.params},
1205
+ input_ids=jnp.array(input_ids, dtype="i4"),
1206
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1207
+ position_ids=jnp.array(position_ids, dtype="i4"),
1208
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1209
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1210
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1211
+ output_attentions=output_attentions,
1212
+ output_hidden_states=output_hidden_states,
1213
+ return_dict=return_dict,
1214
+ deterministic=not train,
1215
+ rngs=rngs,
1216
+ )
1217
+
1218
+
1219
+ @add_start_docstrings(
1220
+ "The bare Pegasus Model transformer outputting raw hidden-states without any specific head on top.",
1221
+ PEGASUS_START_DOCSTRING,
1222
+ )
1223
+ class FlaxPegasusModel(FlaxPegasusPreTrainedModel):
1224
+ config: PegasusConfig
1225
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
1226
+ module_class = FlaxPegasusModule
1227
+
1228
+
1229
+ append_call_sample_docstring(FlaxPegasusModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
1230
+
1231
+
1232
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->Pegasus
1233
+ class FlaxPegasusForConditionalGenerationModule(nn.Module):
1234
+ config: PegasusConfig
1235
+ dtype: jnp.dtype = jnp.float32
1236
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
1237
+
1238
+ def setup(self):
1239
+ self.model = FlaxPegasusModule(config=self.config, dtype=self.dtype)
1240
+ self.lm_head = nn.Dense(
1241
+ self.model.shared.num_embeddings,
1242
+ use_bias=False,
1243
+ dtype=self.dtype,
1244
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1245
+ )
1246
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
1247
+
1248
+ def _get_encoder_module(self):
1249
+ return self.model.encoder
1250
+
1251
+ def _get_decoder_module(self):
1252
+ return self.model.decoder
1253
+
1254
+ def __call__(
1255
+ self,
1256
+ input_ids,
1257
+ attention_mask,
1258
+ decoder_input_ids,
1259
+ decoder_attention_mask,
1260
+ position_ids,
1261
+ decoder_position_ids,
1262
+ output_attentions: bool = False,
1263
+ output_hidden_states: bool = False,
1264
+ return_dict: bool = True,
1265
+ deterministic: bool = True,
1266
+ ):
1267
+ outputs = self.model(
1268
+ input_ids=input_ids,
1269
+ attention_mask=attention_mask,
1270
+ decoder_input_ids=decoder_input_ids,
1271
+ decoder_attention_mask=decoder_attention_mask,
1272
+ position_ids=position_ids,
1273
+ decoder_position_ids=decoder_position_ids,
1274
+ output_attentions=output_attentions,
1275
+ output_hidden_states=output_hidden_states,
1276
+ return_dict=return_dict,
1277
+ deterministic=deterministic,
1278
+ )
1279
+
1280
+ hidden_states = outputs[0]
1281
+
1282
+ if self.config.tie_word_embeddings:
1283
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
1284
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1285
+ else:
1286
+ lm_logits = self.lm_head(hidden_states)
1287
+
1288
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
1289
+
1290
+ if not return_dict:
1291
+ output = (lm_logits,) + outputs[1:]
1292
+ return output
1293
+
1294
+ return FlaxSeq2SeqLMOutput(
1295
+ logits=lm_logits,
1296
+ decoder_hidden_states=outputs.decoder_hidden_states,
1297
+ decoder_attentions=outputs.decoder_attentions,
1298
+ cross_attentions=outputs.cross_attentions,
1299
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1300
+ encoder_hidden_states=outputs.encoder_hidden_states,
1301
+ encoder_attentions=outputs.encoder_attentions,
1302
+ )
1303
+
1304
+
1305
+ @add_start_docstrings(
1306
+ "The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
1307
+ )
1308
+ class FlaxPegasusForConditionalGeneration(FlaxPegasusPreTrainedModel):
1309
+ module_class = FlaxPegasusForConditionalGenerationModule
1310
+ dtype: jnp.dtype = jnp.float32
1311
+
1312
+ @add_start_docstrings(PEGASUS_DECODE_INPUTS_DOCSTRING)
1313
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=PegasusConfig)
1314
+ def decode(
1315
+ self,
1316
+ decoder_input_ids,
1317
+ encoder_outputs,
1318
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1319
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1320
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1321
+ past_key_values: dict = None,
1322
+ output_attentions: Optional[bool] = None,
1323
+ output_hidden_states: Optional[bool] = None,
1324
+ return_dict: Optional[bool] = None,
1325
+ deterministic: bool = True,
1326
+ params: dict = None,
1327
+ dropout_rng: PRNGKey = None,
1328
+ ):
1329
+ r"""
1330
+ Returns:
1331
+
1332
+ Example:
1333
+
1334
+ ```python
1335
+ >>> import jax.numpy as jnp
1336
+ >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
1337
+
1338
+ >>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
1339
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
1340
+
1341
+ >>> text = "My friends are cool but they eat too many carbs."
1342
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
1343
+ >>> encoder_outputs = model.encode(**inputs)
1344
+
1345
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1346
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1347
+
1348
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1349
+ >>> logits = outputs.logits
1350
+ ```"""
1351
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1352
+ output_hidden_states = (
1353
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1354
+ )
1355
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1356
+
1357
+ encoder_hidden_states = encoder_outputs[0]
1358
+ if encoder_attention_mask is None:
1359
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1360
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1361
+
1362
+ batch_size, sequence_length = decoder_input_ids.shape
1363
+ if decoder_attention_mask is None:
1364
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1365
+
1366
+ if decoder_position_ids is None:
1367
+ if past_key_values is not None:
1368
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1369
+
1370
+ decoder_position_ids = jnp.broadcast_to(
1371
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1372
+ )
1373
+
1374
+ # Handle any PRNG if needed
1375
+ rngs = {}
1376
+ if dropout_rng is not None:
1377
+ rngs["dropout"] = dropout_rng
1378
+
1379
+ inputs = {"params": params or self.params}
1380
+
1381
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1382
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1383
+ # it can be changed by FlaxPegasusAttention module
1384
+ if past_key_values:
1385
+ inputs["cache"] = past_key_values
1386
+ mutable = ["cache"]
1387
+ else:
1388
+ mutable = False
1389
+
1390
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1391
+ decoder_module = module._get_decoder_module()
1392
+ outputs = decoder_module(
1393
+ decoder_input_ids,
1394
+ decoder_attention_mask,
1395
+ decoder_position_ids,
1396
+ **kwargs,
1397
+ )
1398
+ hidden_states = outputs[0]
1399
+
1400
+ if self.config.tie_word_embeddings:
1401
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
1402
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1403
+ else:
1404
+ lm_logits = module.lm_head(hidden_states)
1405
+
1406
+ lm_logits += module.final_logits_bias.astype(self.dtype)
1407
+ return lm_logits, outputs
1408
+
1409
+ outputs = self.module.apply(
1410
+ inputs,
1411
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1412
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1413
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1414
+ encoder_hidden_states=encoder_hidden_states,
1415
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1416
+ output_attentions=output_attentions,
1417
+ output_hidden_states=output_hidden_states,
1418
+ return_dict=return_dict,
1419
+ deterministic=deterministic,
1420
+ rngs=rngs,
1421
+ mutable=mutable,
1422
+ method=_decoder_forward,
1423
+ )
1424
+
1425
+ if past_key_values is None:
1426
+ lm_logits, decoder_outputs = outputs
1427
+ else:
1428
+ (lm_logits, decoder_outputs), past = outputs
1429
+
1430
+ if return_dict:
1431
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
1432
+ logits=lm_logits,
1433
+ hidden_states=decoder_outputs.hidden_states,
1434
+ attentions=decoder_outputs.attentions,
1435
+ cross_attentions=decoder_outputs.cross_attentions,
1436
+ )
1437
+ else:
1438
+ outputs = (lm_logits,) + decoder_outputs[1:]
1439
+
1440
+ # add updated cache to model output
1441
+ if past_key_values is not None and return_dict:
1442
+ outputs["past_key_values"] = unfreeze(past["cache"])
1443
+ return outputs
1444
+ elif past_key_values is not None and not return_dict:
1445
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1446
+
1447
+ return outputs
1448
+
1449
+ def prepare_inputs_for_generation(
1450
+ self,
1451
+ decoder_input_ids,
1452
+ max_length,
1453
+ attention_mask: Optional[jax.Array] = None,
1454
+ decoder_attention_mask: Optional[jax.Array] = None,
1455
+ encoder_outputs=None,
1456
+ **kwargs,
1457
+ ):
1458
+ # initializing the cache
1459
+ batch_size, seq_length = decoder_input_ids.shape
1460
+
1461
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1462
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1463
+ # But since the decoder uses a causal mask, those positions are masked anyways.
1464
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
1465
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1466
+ if decoder_attention_mask is not None:
1467
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1468
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
1469
+ else:
1470
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1471
+
1472
+ return {
1473
+ "past_key_values": past_key_values,
1474
+ "encoder_outputs": encoder_outputs,
1475
+ "encoder_attention_mask": attention_mask,
1476
+ "decoder_attention_mask": extended_attention_mask,
1477
+ "decoder_position_ids": position_ids,
1478
+ }
1479
+
1480
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1481
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1482
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
1483
+ return model_kwargs
1484
+
1485
+
1486
+ FLAX_PEGASUS_CONDITIONAL_GENERATION_DOCSTRING = """
1487
+ Returns:
1488
+
1489
+ Summarization example:
1490
+
1491
+ ```pyton
1492
+ >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
1493
+
1494
+ >>> model = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-large')
1495
+ >>> tokenizer = AutoTokenizer.from_pretrained('google/pegasus-large')
1496
+
1497
+ >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
1498
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='np')
1499
+
1500
+ >>> # Generate Summary
1501
+ >>> summary_ids = model.generate(inputs['input_ids']).sequences
1502
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
1503
+ ```
1504
+
1505
+ Mask filling example:
1506
+
1507
+ ```python
1508
+ >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
1509
+
1510
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
1511
+ >>> TXT = "My friends are <mask> but they eat too many carbs."
1512
+
1513
+ >>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
1514
+ >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"]
1515
+ >>> logits = model(input_ids).logits
1516
+
1517
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
1518
+ >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
1519
+ >>> values, predictions = jax.lax.top_k(probs)
1520
+
1521
+ >>> tokenizer.decode(predictions).split()
1522
+ ```
1523
+ """
1524
+
1525
+ overwrite_call_docstring(
1526
+ FlaxPegasusForConditionalGeneration, PEGASUS_INPUTS_DOCSTRING + FLAX_PEGASUS_CONDITIONAL_GENERATION_DOCSTRING
1527
+ )
1528
+ append_replace_return_docstrings(
1529
+ FlaxPegasusForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
1530
+ )
venv/lib/python3.10/site-packages/transformers/models/pegasus/modeling_pegasus.py ADDED
@@ -0,0 +1,1693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch PEGASUS model."""
16
+
17
+ import copy
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ Seq2SeqLMOutput,
34
+ Seq2SeqModelOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_end_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_pegasus import PegasusConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "google/pegasus-large"
50
+ _CONFIG_FOR_DOC = "PegasusConfig"
51
+
52
+
53
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
54
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
55
+ """
56
+ Shift input ids one token to the right.
57
+ """
58
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
59
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
60
+ shifted_input_ids[:, 0] = decoder_start_token_id
61
+
62
+ if pad_token_id is None:
63
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
64
+ # replace possible -100 values in labels by `pad_token_id`
65
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
66
+
67
+ return shifted_input_ids
68
+
69
+
70
+ # Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Pegasus
71
+ class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
72
+ """This module produces sinusoidal positional embeddings of any length."""
73
+
74
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
75
+ super().__init__(num_positions, embedding_dim)
76
+ self.weight = self._init_weight(self.weight)
77
+
78
+ @staticmethod
79
+ def _init_weight(out: nn.Parameter) -> nn.Parameter:
80
+ """
81
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
82
+ the 2nd half of the vector. [dim // 2:]
83
+ """
84
+ n_pos, dim = out.shape
85
+ position_enc = np.array(
86
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
87
+ )
88
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
89
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
90
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
91
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
92
+ out.detach_()
93
+ return out
94
+
95
+ @torch.no_grad()
96
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
97
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
98
+ bsz, seq_len = input_ids_shape[:2]
99
+ positions = torch.arange(
100
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
101
+ )
102
+ return super().forward(positions)
103
+
104
+
105
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
106
+ class PegasusAttention(nn.Module):
107
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
108
+
109
+ def __init__(
110
+ self,
111
+ embed_dim: int,
112
+ num_heads: int,
113
+ dropout: float = 0.0,
114
+ is_decoder: bool = False,
115
+ bias: bool = True,
116
+ is_causal: bool = False,
117
+ config: Optional[PegasusConfig] = None,
118
+ ):
119
+ super().__init__()
120
+ self.embed_dim = embed_dim
121
+ self.num_heads = num_heads
122
+ self.dropout = dropout
123
+ self.head_dim = embed_dim // num_heads
124
+ self.config = config
125
+
126
+ if (self.head_dim * num_heads) != self.embed_dim:
127
+ raise ValueError(
128
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
129
+ f" and `num_heads`: {num_heads})."
130
+ )
131
+ self.scaling = self.head_dim**-0.5
132
+ self.is_decoder = is_decoder
133
+ self.is_causal = is_causal
134
+
135
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
136
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
137
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
138
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
139
+
140
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
141
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
142
+
143
+ def forward(
144
+ self,
145
+ hidden_states: torch.Tensor,
146
+ key_value_states: Optional[torch.Tensor] = None,
147
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
148
+ attention_mask: Optional[torch.Tensor] = None,
149
+ layer_head_mask: Optional[torch.Tensor] = None,
150
+ output_attentions: bool = False,
151
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
152
+ """Input shape: Batch x Time x Channel"""
153
+
154
+ # if key_value_states are provided this layer is used as a cross-attention layer
155
+ # for the decoder
156
+ is_cross_attention = key_value_states is not None
157
+
158
+ bsz, tgt_len, _ = hidden_states.size()
159
+
160
+ # get query proj
161
+ query_states = self.q_proj(hidden_states) * self.scaling
162
+ # get key, value proj
163
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
164
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
165
+ # the provided `key_value_states` to support prefix tuning
166
+ if (
167
+ is_cross_attention
168
+ and past_key_value is not None
169
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
170
+ ):
171
+ # reuse k,v, cross_attentions
172
+ key_states = past_key_value[0]
173
+ value_states = past_key_value[1]
174
+ elif is_cross_attention:
175
+ # cross_attentions
176
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
177
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
178
+ elif past_key_value is not None:
179
+ # reuse k, v, self_attention
180
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
181
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
182
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
183
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
184
+ else:
185
+ # self_attention
186
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
187
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
188
+
189
+ if self.is_decoder:
190
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
191
+ # Further calls to cross_attention layer can then reuse all cross-attention
192
+ # key/value_states (first "if" case)
193
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
194
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
195
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
196
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
197
+ past_key_value = (key_states, value_states)
198
+
199
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
200
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
201
+ key_states = key_states.reshape(*proj_shape)
202
+ value_states = value_states.reshape(*proj_shape)
203
+
204
+ src_len = key_states.size(1)
205
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
206
+
207
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
208
+ raise ValueError(
209
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
210
+ f" {attn_weights.size()}"
211
+ )
212
+
213
+ if attention_mask is not None:
214
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
215
+ raise ValueError(
216
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
217
+ )
218
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
219
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
220
+
221
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
222
+
223
+ if layer_head_mask is not None:
224
+ if layer_head_mask.size() != (self.num_heads,):
225
+ raise ValueError(
226
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
227
+ f" {layer_head_mask.size()}"
228
+ )
229
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
230
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
231
+
232
+ if output_attentions:
233
+ # this operation is a bit awkward, but it's required to
234
+ # make sure that attn_weights keeps its gradient.
235
+ # In order to do so, attn_weights have to be reshaped
236
+ # twice and have to be reused in the following
237
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
238
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
239
+ else:
240
+ attn_weights_reshaped = None
241
+
242
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
243
+
244
+ attn_output = torch.bmm(attn_probs, value_states)
245
+
246
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
247
+ raise ValueError(
248
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
249
+ f" {attn_output.size()}"
250
+ )
251
+
252
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
253
+ attn_output = attn_output.transpose(1, 2)
254
+
255
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
256
+ # partitioned across GPUs when using tensor-parallelism.
257
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
258
+
259
+ attn_output = self.out_proj(attn_output)
260
+
261
+ return attn_output, attn_weights_reshaped, past_key_value
262
+
263
+
264
+ PEGASUS_ATTENTION_CLASSES = {"eager": PegasusAttention}
265
+
266
+
267
+ # Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus, MBART->PEGASUS
268
+ class PegasusEncoderLayer(nn.Module):
269
+ def __init__(self, config: PegasusConfig):
270
+ super().__init__()
271
+ self.embed_dim = config.d_model
272
+
273
+ self.self_attn = PEGASUS_ATTENTION_CLASSES[config._attn_implementation](
274
+ embed_dim=self.embed_dim,
275
+ num_heads=config.encoder_attention_heads,
276
+ dropout=config.attention_dropout,
277
+ config=config,
278
+ )
279
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
280
+ self.dropout = config.dropout
281
+ self.activation_fn = ACT2FN[config.activation_function]
282
+ self.activation_dropout = config.activation_dropout
283
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
284
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
285
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
286
+
287
+ def forward(
288
+ self,
289
+ hidden_states: torch.Tensor,
290
+ attention_mask: torch.Tensor,
291
+ layer_head_mask: torch.Tensor,
292
+ output_attentions: bool = False,
293
+ ) -> torch.Tensor:
294
+ """
295
+ Args:
296
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
297
+ attention_mask (`torch.FloatTensor`): attention mask of size
298
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
299
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
300
+ `(encoder_attention_heads,)`.
301
+ output_attentions (`bool`, *optional*):
302
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
303
+ returned tensors for more detail.
304
+ """
305
+ residual = hidden_states
306
+ hidden_states = self.self_attn_layer_norm(hidden_states)
307
+ hidden_states, attn_weights, _ = self.self_attn(
308
+ hidden_states=hidden_states,
309
+ attention_mask=attention_mask,
310
+ layer_head_mask=layer_head_mask,
311
+ output_attentions=output_attentions,
312
+ )
313
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
314
+ hidden_states = residual + hidden_states
315
+
316
+ residual = hidden_states
317
+ hidden_states = self.final_layer_norm(hidden_states)
318
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
319
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
320
+ hidden_states = self.fc2(hidden_states)
321
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
322
+ hidden_states = residual + hidden_states
323
+
324
+ if hidden_states.dtype == torch.float16 and (
325
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
326
+ ):
327
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
328
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
329
+
330
+ outputs = (hidden_states,)
331
+
332
+ if output_attentions:
333
+ outputs += (attn_weights,)
334
+
335
+ return outputs
336
+
337
+
338
+ # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus, MBART->PEGASUS
339
+ class PegasusDecoderLayer(nn.Module):
340
+ def __init__(self, config: PegasusConfig):
341
+ super().__init__()
342
+ self.embed_dim = config.d_model
343
+
344
+ self.self_attn = PEGASUS_ATTENTION_CLASSES[config._attn_implementation](
345
+ embed_dim=self.embed_dim,
346
+ num_heads=config.decoder_attention_heads,
347
+ dropout=config.attention_dropout,
348
+ is_decoder=True,
349
+ is_causal=True,
350
+ config=config,
351
+ )
352
+ self.dropout = config.dropout
353
+ self.activation_fn = ACT2FN[config.activation_function]
354
+ self.activation_dropout = config.activation_dropout
355
+
356
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
357
+ self.encoder_attn = PEGASUS_ATTENTION_CLASSES[config._attn_implementation](
358
+ self.embed_dim,
359
+ config.decoder_attention_heads,
360
+ dropout=config.attention_dropout,
361
+ is_decoder=True,
362
+ config=config,
363
+ )
364
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
365
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
366
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
367
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
368
+
369
+ def forward(
370
+ self,
371
+ hidden_states: torch.Tensor,
372
+ attention_mask: Optional[torch.Tensor] = None,
373
+ encoder_hidden_states: Optional[torch.Tensor] = None,
374
+ encoder_attention_mask: Optional[torch.Tensor] = None,
375
+ layer_head_mask: Optional[torch.Tensor] = None,
376
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
377
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
378
+ output_attentions: Optional[bool] = False,
379
+ use_cache: Optional[bool] = True,
380
+ ) -> torch.Tensor:
381
+ """
382
+ Args:
383
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
384
+ attention_mask (`torch.FloatTensor`): attention mask of size
385
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
386
+ encoder_hidden_states (`torch.FloatTensor`):
387
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
388
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
389
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
390
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
391
+ `(encoder_attention_heads,)`.
392
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
393
+ size `(decoder_attention_heads,)`.
394
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
395
+ output_attentions (`bool`, *optional*):
396
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
397
+ returned tensors for more detail.
398
+ """
399
+ residual = hidden_states
400
+ hidden_states = self.self_attn_layer_norm(hidden_states)
401
+
402
+ # Self Attention
403
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
404
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
405
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
406
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
407
+ hidden_states=hidden_states,
408
+ past_key_value=self_attn_past_key_value,
409
+ attention_mask=attention_mask,
410
+ layer_head_mask=layer_head_mask,
411
+ output_attentions=output_attentions,
412
+ )
413
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
414
+ hidden_states = residual + hidden_states
415
+
416
+ # Cross-Attention Block
417
+ cross_attn_present_key_value = None
418
+ cross_attn_weights = None
419
+ if encoder_hidden_states is not None:
420
+ residual = hidden_states
421
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
422
+
423
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
424
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
425
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
426
+ hidden_states=hidden_states,
427
+ key_value_states=encoder_hidden_states,
428
+ attention_mask=encoder_attention_mask,
429
+ layer_head_mask=cross_attn_layer_head_mask,
430
+ past_key_value=cross_attn_past_key_value,
431
+ output_attentions=output_attentions,
432
+ )
433
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
434
+ hidden_states = residual + hidden_states
435
+
436
+ # add cross-attn to positions 3,4 of present_key_value tuple
437
+ present_key_value = present_key_value + cross_attn_present_key_value
438
+
439
+ # Fully Connected
440
+ residual = hidden_states
441
+ hidden_states = self.final_layer_norm(hidden_states)
442
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
443
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
444
+ hidden_states = self.fc2(hidden_states)
445
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
446
+ hidden_states = residual + hidden_states
447
+
448
+ outputs = (hidden_states,)
449
+
450
+ if output_attentions:
451
+ outputs += (self_attn_weights, cross_attn_weights)
452
+
453
+ if use_cache:
454
+ outputs += (present_key_value,)
455
+
456
+ return outputs
457
+
458
+
459
+ class PegasusPreTrainedModel(PreTrainedModel):
460
+ config_class = PegasusConfig
461
+ base_model_prefix = "model"
462
+ supports_gradient_checkpointing = True
463
+
464
+ def _init_weights(self, module):
465
+ std = self.config.init_std
466
+ if isinstance(module, nn.Linear):
467
+ module.weight.data.normal_(mean=0.0, std=std)
468
+ if module.bias is not None:
469
+ module.bias.data.zero_()
470
+ elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
471
+ pass
472
+ elif isinstance(module, nn.Embedding):
473
+ module.weight.data.normal_(mean=0.0, std=std)
474
+ if module.padding_idx is not None:
475
+ module.weight.data[module.padding_idx].zero_()
476
+
477
+
478
+ PEGASUS_START_DOCSTRING = r"""
479
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
480
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
481
+ etc.)
482
+
483
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
484
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
485
+ and behavior.
486
+
487
+ Parameters:
488
+ config ([`PegasusConfig`]):
489
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
490
+ load the weights associated with the model, only the configuration. Check out the
491
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
492
+ """
493
+
494
+ PEGASUS_GENERATION_EXAMPLE = r"""
495
+ Summarization example:
496
+
497
+ ```python
498
+ >>> from transformers import AutoTokenizer, PegasusForConditionalGeneration
499
+
500
+ >>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
501
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
502
+
503
+ >>> ARTICLE_TO_SUMMARIZE = (
504
+ ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
505
+ ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
506
+ ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
507
+ ... )
508
+ >>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
509
+
510
+ >>> # Generate Summary
511
+ >>> summary_ids = model.generate(inputs["input_ids"])
512
+ >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
513
+ "California's largest electricity provider has turned off power to hundreds of thousands of customers."
514
+ ```
515
+ """
516
+
517
+ PEGASUS_INPUTS_DOCSTRING = r"""
518
+ Args:
519
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
520
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
521
+ it.
522
+
523
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
524
+ [`PreTrainedTokenizer.__call__`] for details.
525
+
526
+ [What are input IDs?](../glossary#input-ids)
527
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
528
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
529
+
530
+ - 1 for tokens that are **not masked**,
531
+ - 0 for tokens that are **masked**.
532
+
533
+ [What are attention masks?](../glossary#attention-mask)
534
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
535
+ Indices of decoder input sequence tokens in the vocabulary.
536
+
537
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
538
+ [`PreTrainedTokenizer.__call__`] for details.
539
+
540
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
541
+
542
+ Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
543
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
544
+ `past_key_values`).
545
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
546
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
547
+ be used by default.
548
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
549
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
550
+
551
+ - 1 indicates the head is **not masked**,
552
+ - 0 indicates the head is **masked**.
553
+
554
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
555
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
556
+
557
+ - 1 indicates the head is **not masked**,
558
+ - 0 indicates the head is **masked**.
559
+
560
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
561
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
562
+ 1]`:
563
+
564
+ - 1 indicates the head is **not masked**,
565
+ - 0 indicates the head is **masked**.
566
+
567
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
568
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
569
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
570
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
571
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
572
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
573
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
574
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
575
+
576
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
577
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
578
+
579
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
580
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
581
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
582
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
583
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
584
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
585
+ than the model's internal embedding lookup matrix.
586
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
587
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
588
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
589
+ input (see `past_key_values`). This is useful if you want more control over how to convert
590
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
591
+
592
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
593
+ of `inputs_embeds`.
594
+ use_cache (`bool`, *optional*):
595
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
596
+ `past_key_values`).
597
+ output_attentions (`bool`, *optional*):
598
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
599
+ tensors for more detail.
600
+ output_hidden_states (`bool`, *optional*):
601
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
602
+ more detail.
603
+ return_dict (`bool`, *optional*):
604
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
605
+ """
606
+
607
+
608
+ class PegasusEncoder(PegasusPreTrainedModel):
609
+ """
610
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
611
+ [`PegasusEncoderLayer`].
612
+
613
+ Args:
614
+ config: PegasusConfig
615
+ embed_tokens (nn.Embedding): output embedding
616
+ """
617
+
618
+ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
619
+ super().__init__(config)
620
+
621
+ self.dropout = config.dropout
622
+ self.layerdrop = config.encoder_layerdrop
623
+
624
+ embed_dim = config.d_model
625
+ self.padding_idx = config.pad_token_id
626
+ self.max_source_positions = config.max_position_embeddings
627
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
628
+
629
+ if embed_tokens is not None:
630
+ self.embed_tokens = embed_tokens
631
+ else:
632
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
633
+
634
+ self.embed_positions = PegasusSinusoidalPositionalEmbedding(
635
+ config.max_position_embeddings,
636
+ embed_dim,
637
+ self.padding_idx,
638
+ )
639
+ self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
640
+ self.layer_norm = nn.LayerNorm(config.d_model)
641
+
642
+ self.gradient_checkpointing = False
643
+ # Initialize weights and apply final processing
644
+ self.post_init()
645
+
646
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
647
+ """
648
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
649
+ config.max_position_embeddings`.
650
+
651
+ Arguments:
652
+ new_num_position_embeddings (`int`):
653
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
654
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
655
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
656
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
657
+ will remove vectors from the end.
658
+ """
659
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
660
+ self.config.max_position_embeddings = new_num_position_embeddings
661
+
662
+ self.embed_positions = PegasusSinusoidalPositionalEmbedding(
663
+ self.config.max_position_embeddings,
664
+ self.config.d_model,
665
+ self.padding_idx,
666
+ )
667
+ self.embed_positions.to(self.device)
668
+
669
+ def get_position_embeddings(self) -> nn.Embedding:
670
+ """
671
+ Returns the position embeddings matrix
672
+ """
673
+ return self.embed_positions
674
+
675
+ def forward(
676
+ self,
677
+ input_ids=None,
678
+ attention_mask=None,
679
+ head_mask=None,
680
+ inputs_embeds=None,
681
+ output_attentions=None,
682
+ output_hidden_states=None,
683
+ return_dict=None,
684
+ ):
685
+ r"""
686
+ Args:
687
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
688
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
689
+ provide it.
690
+
691
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
692
+ [`PreTrainedTokenizer.__call__`] for details.
693
+
694
+ [What are input IDs?](../glossary#input-ids)
695
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
696
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
697
+
698
+ - 1 for tokens that are **not masked**,
699
+ - 0 for tokens that are **masked**.
700
+
701
+ [What are attention masks?](../glossary#attention-mask)
702
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
703
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
704
+
705
+ - 1 indicates the head is **not masked**,
706
+ - 0 indicates the head is **masked**.
707
+
708
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
709
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
710
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
711
+ than the model's internal embedding lookup matrix.
712
+ output_attentions (`bool`, *optional*):
713
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
714
+ returned tensors for more detail.
715
+ output_hidden_states (`bool`, *optional*):
716
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
717
+ for more detail.
718
+ return_dict (`bool`, *optional*):
719
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
720
+ """
721
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
722
+ output_hidden_states = (
723
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
724
+ )
725
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
726
+
727
+ # retrieve input_ids and inputs_embeds
728
+ if input_ids is not None and inputs_embeds is not None:
729
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
730
+ elif input_ids is not None:
731
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
732
+ input_shape = input_ids.size()
733
+ input_ids = input_ids.view(-1, input_shape[-1])
734
+ elif inputs_embeds is not None:
735
+ input_shape = inputs_embeds.size()[:-1]
736
+ else:
737
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
738
+
739
+ if inputs_embeds is None:
740
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
741
+
742
+ embed_pos = self.embed_positions(input_shape)
743
+
744
+ hidden_states = inputs_embeds + embed_pos
745
+
746
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
747
+
748
+ # expand attention_mask
749
+ if attention_mask is not None:
750
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
751
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
752
+
753
+ encoder_states = () if output_hidden_states else None
754
+ all_attentions = () if output_attentions else None
755
+
756
+ # check if head_mask has a correct number of layers specified if desired
757
+ if head_mask is not None:
758
+ if head_mask.size()[0] != len(self.layers):
759
+ raise ValueError(
760
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
761
+ f" {head_mask.size()[0]}."
762
+ )
763
+ for idx, encoder_layer in enumerate(self.layers):
764
+ if output_hidden_states:
765
+ encoder_states = encoder_states + (hidden_states,)
766
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
767
+ to_drop = False
768
+ if self.training:
769
+ dropout_probability = torch.rand([])
770
+ if dropout_probability < self.layerdrop: # skip the layer
771
+ to_drop = True
772
+
773
+ if to_drop:
774
+ layer_outputs = (None, None)
775
+ else:
776
+ if self.gradient_checkpointing and self.training:
777
+ layer_outputs = self._gradient_checkpointing_func(
778
+ encoder_layer.__call__,
779
+ hidden_states,
780
+ attention_mask,
781
+ (head_mask[idx] if head_mask is not None else None),
782
+ output_attentions,
783
+ )
784
+ else:
785
+ layer_outputs = encoder_layer(
786
+ hidden_states,
787
+ attention_mask,
788
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
789
+ output_attentions=output_attentions,
790
+ )
791
+
792
+ hidden_states = layer_outputs[0]
793
+
794
+ if output_attentions:
795
+ all_attentions = all_attentions + (layer_outputs[1],)
796
+
797
+ hidden_states = self.layer_norm(hidden_states)
798
+
799
+ if output_hidden_states:
800
+ encoder_states = encoder_states + (hidden_states,)
801
+
802
+ if not return_dict:
803
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
804
+ return BaseModelOutput(
805
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
806
+ )
807
+
808
+
809
+ class PegasusDecoder(PegasusPreTrainedModel):
810
+ """
811
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
812
+
813
+ Args:
814
+ config: PegasusConfig
815
+ embed_tokens (nn.Embedding): output embedding
816
+ """
817
+
818
+ def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
819
+ super().__init__(config)
820
+ self.dropout = config.dropout
821
+ self.layerdrop = config.decoder_layerdrop
822
+ self.padding_idx = config.pad_token_id
823
+ self.max_target_positions = config.max_position_embeddings
824
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
825
+
826
+ if embed_tokens is not None:
827
+ self.embed_tokens = embed_tokens
828
+ else:
829
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
830
+
831
+ self.embed_positions = PegasusSinusoidalPositionalEmbedding(
832
+ config.max_position_embeddings,
833
+ config.d_model,
834
+ self.padding_idx,
835
+ )
836
+ self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)])
837
+ self.layer_norm = nn.LayerNorm(config.d_model)
838
+
839
+ self.gradient_checkpointing = False
840
+ # Initialize weights and apply final processing
841
+ self.post_init()
842
+
843
+ def get_input_embeddings(self):
844
+ return self.embed_tokens
845
+
846
+ def set_input_embeddings(self, value):
847
+ self.embed_tokens = value
848
+
849
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
850
+ """
851
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
852
+ config.max_position_embeddings`.
853
+
854
+ Arguments:
855
+ new_num_position_embeddings (`int`):
856
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
857
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
858
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
859
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
860
+ will remove vectors from the end.
861
+ """
862
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
863
+ self.config.max_position_embeddings = new_num_position_embeddings
864
+
865
+ self.embed_positions = PegasusSinusoidalPositionalEmbedding(
866
+ self.config.max_position_embeddings,
867
+ self.config.d_model,
868
+ self.padding_idx,
869
+ )
870
+ self.embed_positions.to(self.device)
871
+
872
+ def get_position_embeddings(self) -> nn.Embedding:
873
+ """
874
+ Returns the position embeddings matrix
875
+ """
876
+ return self.embed_positions
877
+
878
+ def forward(
879
+ self,
880
+ input_ids=None,
881
+ attention_mask=None,
882
+ encoder_hidden_states=None,
883
+ encoder_attention_mask=None,
884
+ head_mask=None,
885
+ cross_attn_head_mask=None,
886
+ past_key_values=None,
887
+ inputs_embeds=None,
888
+ use_cache=None,
889
+ output_attentions=None,
890
+ output_hidden_states=None,
891
+ return_dict=None,
892
+ ):
893
+ r"""
894
+ Args:
895
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
896
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
897
+ provide it.
898
+
899
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
900
+ [`PreTrainedTokenizer.__call__`] for details.
901
+
902
+ [What are input IDs?](../glossary#input-ids)
903
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
904
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
905
+
906
+ - 1 for tokens that are **not masked**,
907
+ - 0 for tokens that are **masked**.
908
+
909
+ [What are attention masks?](../glossary#attention-mask)
910
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
911
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
912
+ of the decoder.
913
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
914
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
915
+ selected in `[0, 1]`:
916
+
917
+ - 1 for tokens that are **not masked**,
918
+ - 0 for tokens that are **masked**.
919
+
920
+ [What are attention masks?](../glossary#attention-mask)
921
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
922
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
923
+
924
+ - 1 indicates the head is **not masked**,
925
+ - 0 indicates the head is **masked**.
926
+
927
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
928
+ Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
929
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
930
+
931
+ - 1 indicates the head is **not masked**,
932
+ - 0 indicates the head is **masked**.
933
+
934
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
935
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
936
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
937
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
938
+
939
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
940
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
941
+
942
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
943
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
944
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
945
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
946
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
947
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
948
+ than the model's internal embedding lookup matrix.
949
+ output_attentions (`bool`, *optional*):
950
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
951
+ returned tensors for more detail.
952
+ output_hidden_states (`bool`, *optional*):
953
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
954
+ for more detail.
955
+ return_dict (`bool`, *optional*):
956
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
957
+ """
958
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
959
+ output_hidden_states = (
960
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
961
+ )
962
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
963
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
964
+
965
+ # retrieve input_ids and inputs_embeds
966
+ if input_ids is not None and inputs_embeds is not None:
967
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
968
+ elif input_ids is not None:
969
+ input_shape = input_ids.size()
970
+ input_ids = input_ids.view(-1, input_shape[-1])
971
+ elif inputs_embeds is not None:
972
+ input_shape = inputs_embeds.size()[:-1]
973
+ else:
974
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
975
+
976
+ # past_key_values_length
977
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
978
+
979
+ if inputs_embeds is None:
980
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
981
+
982
+ attention_mask = _prepare_4d_causal_attention_mask(
983
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
984
+ )
985
+
986
+ # expand encoder attention mask
987
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
988
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
989
+ encoder_attention_mask = _prepare_4d_attention_mask(
990
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
991
+ )
992
+
993
+ # embed positions
994
+ positions = self.embed_positions(input_shape, past_key_values_length)
995
+
996
+ hidden_states = inputs_embeds + positions
997
+
998
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
999
+
1000
+ if self.gradient_checkpointing and self.training:
1001
+ if use_cache:
1002
+ logger.warning_once(
1003
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1004
+ )
1005
+ use_cache = False
1006
+
1007
+ # decoder layers
1008
+ all_hidden_states = () if output_hidden_states else None
1009
+ all_self_attns = () if output_attentions else None
1010
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
1011
+ next_decoder_cache = () if use_cache else None
1012
+
1013
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1014
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1015
+ if attn_mask is not None:
1016
+ if attn_mask.size()[0] != len(self.layers):
1017
+ raise ValueError(
1018
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1019
+ f" {head_mask.size()[0]}."
1020
+ )
1021
+ for idx, decoder_layer in enumerate(self.layers):
1022
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1023
+ if output_hidden_states:
1024
+ all_hidden_states += (hidden_states,)
1025
+ if self.training:
1026
+ dropout_probability = torch.rand([])
1027
+ if dropout_probability < self.layerdrop:
1028
+ continue
1029
+
1030
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1031
+
1032
+ if self.gradient_checkpointing and self.training:
1033
+ layer_outputs = self._gradient_checkpointing_func(
1034
+ decoder_layer.__call__,
1035
+ hidden_states,
1036
+ attention_mask,
1037
+ encoder_hidden_states,
1038
+ encoder_attention_mask,
1039
+ head_mask[idx] if head_mask is not None else None,
1040
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1041
+ None,
1042
+ output_attentions,
1043
+ use_cache,
1044
+ )
1045
+ else:
1046
+ layer_outputs = decoder_layer(
1047
+ hidden_states,
1048
+ attention_mask=attention_mask,
1049
+ encoder_hidden_states=encoder_hidden_states,
1050
+ encoder_attention_mask=encoder_attention_mask,
1051
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1052
+ cross_attn_layer_head_mask=(
1053
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1054
+ ),
1055
+ past_key_value=past_key_value,
1056
+ output_attentions=output_attentions,
1057
+ use_cache=use_cache,
1058
+ )
1059
+ hidden_states = layer_outputs[0]
1060
+
1061
+ if use_cache:
1062
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1063
+
1064
+ if output_attentions:
1065
+ all_self_attns += (layer_outputs[1],)
1066
+
1067
+ if encoder_hidden_states is not None:
1068
+ all_cross_attentions += (layer_outputs[2],)
1069
+
1070
+ hidden_states = self.layer_norm(hidden_states)
1071
+
1072
+ # add hidden states from the last decoder layer
1073
+ if output_hidden_states:
1074
+ all_hidden_states += (hidden_states,)
1075
+
1076
+ next_cache = next_decoder_cache if use_cache else None
1077
+ if not return_dict:
1078
+ return tuple(
1079
+ v
1080
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1081
+ if v is not None
1082
+ )
1083
+ return BaseModelOutputWithPastAndCrossAttentions(
1084
+ last_hidden_state=hidden_states,
1085
+ past_key_values=next_cache,
1086
+ hidden_states=all_hidden_states,
1087
+ attentions=all_self_attns,
1088
+ cross_attentions=all_cross_attentions,
1089
+ )
1090
+
1091
+
1092
+ @add_start_docstrings(
1093
+ "The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
1094
+ PEGASUS_START_DOCSTRING,
1095
+ )
1096
+ class PegasusModel(PegasusPreTrainedModel):
1097
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1098
+
1099
+ def __init__(self, config: PegasusConfig):
1100
+ super().__init__(config)
1101
+
1102
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1103
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1104
+
1105
+ self.encoder = PegasusEncoder(config, self.shared)
1106
+ self.decoder = PegasusDecoder(config, self.shared)
1107
+
1108
+ # Initialize weights and apply final processing
1109
+ self.post_init()
1110
+
1111
+ def get_input_embeddings(self):
1112
+ return self.shared
1113
+
1114
+ def set_input_embeddings(self, value):
1115
+ self.shared = value
1116
+ self.encoder.embed_tokens = self.shared
1117
+ self.decoder.embed_tokens = self.shared
1118
+
1119
+ def get_encoder(self):
1120
+ return self.encoder
1121
+
1122
+ def get_decoder(self):
1123
+ return self.decoder
1124
+
1125
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1126
+ """
1127
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
1128
+ config.max_position_embeddings`.
1129
+
1130
+ Arguments:
1131
+ new_num_position_embeddings (`int`):
1132
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1133
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1134
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1135
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1136
+ will remove vectors from the end.
1137
+ """
1138
+ self.config.max_position_embeddings = new_num_position_embeddings
1139
+ self.encoder.resize_position_embeddings(new_num_position_embeddings)
1140
+ self.decoder.resize_position_embeddings(new_num_position_embeddings)
1141
+
1142
+ def get_position_embeddings(self) -> Tuple[nn.Embedding]:
1143
+ """
1144
+ Returns the position embeddings matrix
1145
+ """
1146
+ return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
1147
+
1148
+ @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
1149
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1150
+ def forward(
1151
+ self,
1152
+ input_ids: Optional[torch.Tensor] = None,
1153
+ attention_mask: Optional[torch.Tensor] = None,
1154
+ decoder_input_ids: Optional[torch.Tensor] = None,
1155
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1156
+ head_mask: Optional[torch.Tensor] = None,
1157
+ decoder_head_mask: Optional[torch.Tensor] = None,
1158
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1159
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
1160
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1161
+ inputs_embeds: Optional[torch.Tensor] = None,
1162
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1163
+ use_cache: Optional[bool] = None,
1164
+ output_attentions: Optional[bool] = None,
1165
+ output_hidden_states: Optional[bool] = None,
1166
+ return_dict: Optional[bool] = None,
1167
+ ) -> Union[Tuple, Seq2SeqModelOutput]:
1168
+ r"""
1169
+ Returns:
1170
+
1171
+ Example:
1172
+
1173
+ ```python
1174
+ >>> from transformers import AutoTokenizer, PegasusModel
1175
+
1176
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
1177
+ >>> model = PegasusModel.from_pretrained("google/pegasus-large")
1178
+
1179
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
1180
+ >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
1181
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
1182
+
1183
+ >>> last_hidden_states = outputs.last_hidden_state
1184
+ >>> list(last_hidden_states.shape)
1185
+ [1, 4, 1024]
1186
+ ```"""
1187
+
1188
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1189
+ output_hidden_states = (
1190
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1191
+ )
1192
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1193
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1194
+
1195
+ if encoder_outputs is None:
1196
+ encoder_outputs = self.encoder(
1197
+ input_ids=input_ids,
1198
+ attention_mask=attention_mask,
1199
+ head_mask=head_mask,
1200
+ inputs_embeds=inputs_embeds,
1201
+ output_attentions=output_attentions,
1202
+ output_hidden_states=output_hidden_states,
1203
+ return_dict=return_dict,
1204
+ )
1205
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1206
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1207
+ encoder_outputs = BaseModelOutput(
1208
+ last_hidden_state=encoder_outputs[0],
1209
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1210
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1211
+ )
1212
+
1213
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1214
+ decoder_outputs = self.decoder(
1215
+ input_ids=decoder_input_ids,
1216
+ attention_mask=decoder_attention_mask,
1217
+ encoder_hidden_states=encoder_outputs[0],
1218
+ encoder_attention_mask=attention_mask,
1219
+ head_mask=decoder_head_mask,
1220
+ cross_attn_head_mask=cross_attn_head_mask,
1221
+ past_key_values=past_key_values,
1222
+ inputs_embeds=decoder_inputs_embeds,
1223
+ use_cache=use_cache,
1224
+ output_attentions=output_attentions,
1225
+ output_hidden_states=output_hidden_states,
1226
+ return_dict=return_dict,
1227
+ )
1228
+
1229
+ if not return_dict:
1230
+ return decoder_outputs + encoder_outputs
1231
+
1232
+ return Seq2SeqModelOutput(
1233
+ last_hidden_state=decoder_outputs.last_hidden_state,
1234
+ past_key_values=decoder_outputs.past_key_values,
1235
+ decoder_hidden_states=decoder_outputs.hidden_states,
1236
+ decoder_attentions=decoder_outputs.attentions,
1237
+ cross_attentions=decoder_outputs.cross_attentions,
1238
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1239
+ encoder_hidden_states=encoder_outputs.hidden_states,
1240
+ encoder_attentions=encoder_outputs.attentions,
1241
+ )
1242
+
1243
+
1244
+ @add_start_docstrings(
1245
+ "The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
1246
+ )
1247
+ class PegasusForConditionalGeneration(PegasusPreTrainedModel):
1248
+ base_model_prefix = "model"
1249
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
1250
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
1251
+
1252
+ def __init__(self, config: PegasusConfig):
1253
+ super().__init__(config)
1254
+ self.model = PegasusModel(config)
1255
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
1256
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1257
+
1258
+ # Initialize weights and apply final processing
1259
+ self.post_init()
1260
+
1261
+ def get_encoder(self):
1262
+ return self.model.get_encoder()
1263
+
1264
+ def get_decoder(self):
1265
+ return self.model.get_decoder()
1266
+
1267
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1268
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1269
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1270
+ return new_embeddings
1271
+
1272
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1273
+ old_num_tokens = self.final_logits_bias.shape[-1]
1274
+ if new_num_tokens <= old_num_tokens:
1275
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1276
+ else:
1277
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1278
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1279
+ self.register_buffer("final_logits_bias", new_bias)
1280
+
1281
+ def get_output_embeddings(self):
1282
+ return self.lm_head
1283
+
1284
+ def set_output_embeddings(self, new_embeddings):
1285
+ self.lm_head = new_embeddings
1286
+
1287
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1288
+ """
1289
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
1290
+ config.max_position_embeddings`.
1291
+
1292
+ Arguments:
1293
+ new_num_position_embeddings (`int`):
1294
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1295
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1296
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1297
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1298
+ will remove vectors from the end.
1299
+ """
1300
+ self.config.max_position_embeddings = new_num_position_embeddings
1301
+ self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
1302
+ self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
1303
+
1304
+ def get_position_embeddings(self) -> Tuple[nn.Embedding]:
1305
+ """
1306
+ Returns the position embeddings matrix
1307
+ """
1308
+ return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
1309
+
1310
+ @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
1311
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1312
+ @add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
1313
+ def forward(
1314
+ self,
1315
+ input_ids: Optional[torch.Tensor] = None,
1316
+ attention_mask: Optional[torch.Tensor] = None,
1317
+ decoder_input_ids: Optional[torch.Tensor] = None,
1318
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1319
+ head_mask: Optional[torch.Tensor] = None,
1320
+ decoder_head_mask: Optional[torch.Tensor] = None,
1321
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1322
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
1323
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1324
+ inputs_embeds: Optional[torch.Tensor] = None,
1325
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1326
+ labels: Optional[torch.Tensor] = None,
1327
+ use_cache: Optional[bool] = None,
1328
+ output_attentions: Optional[bool] = None,
1329
+ output_hidden_states: Optional[bool] = None,
1330
+ return_dict: Optional[bool] = None,
1331
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
1332
+ r"""
1333
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1334
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1335
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1336
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1337
+
1338
+ Returns:
1339
+
1340
+ """
1341
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1342
+
1343
+ if labels is not None:
1344
+ if use_cache:
1345
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1346
+ use_cache = False
1347
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1348
+ decoder_input_ids = shift_tokens_right(
1349
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1350
+ )
1351
+
1352
+ outputs = self.model(
1353
+ input_ids,
1354
+ attention_mask=attention_mask,
1355
+ decoder_input_ids=decoder_input_ids,
1356
+ encoder_outputs=encoder_outputs,
1357
+ decoder_attention_mask=decoder_attention_mask,
1358
+ head_mask=head_mask,
1359
+ decoder_head_mask=decoder_head_mask,
1360
+ cross_attn_head_mask=cross_attn_head_mask,
1361
+ past_key_values=past_key_values,
1362
+ inputs_embeds=inputs_embeds,
1363
+ decoder_inputs_embeds=decoder_inputs_embeds,
1364
+ use_cache=use_cache,
1365
+ output_attentions=output_attentions,
1366
+ output_hidden_states=output_hidden_states,
1367
+ return_dict=return_dict,
1368
+ )
1369
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
1370
+
1371
+ masked_lm_loss = None
1372
+ if labels is not None:
1373
+ loss_fct = CrossEntropyLoss()
1374
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1375
+
1376
+ if not return_dict:
1377
+ output = (lm_logits,) + outputs[1:]
1378
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1379
+
1380
+ return Seq2SeqLMOutput(
1381
+ loss=masked_lm_loss,
1382
+ logits=lm_logits,
1383
+ past_key_values=outputs.past_key_values,
1384
+ decoder_hidden_states=outputs.decoder_hidden_states,
1385
+ decoder_attentions=outputs.decoder_attentions,
1386
+ cross_attentions=outputs.cross_attentions,
1387
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1388
+ encoder_hidden_states=outputs.encoder_hidden_states,
1389
+ encoder_attentions=outputs.encoder_attentions,
1390
+ )
1391
+
1392
+ def prepare_inputs_for_generation(
1393
+ self,
1394
+ decoder_input_ids,
1395
+ past_key_values=None,
1396
+ attention_mask=None,
1397
+ head_mask=None,
1398
+ decoder_head_mask=None,
1399
+ cross_attn_head_mask=None,
1400
+ use_cache=None,
1401
+ encoder_outputs=None,
1402
+ **kwargs,
1403
+ ):
1404
+ # cut decoder_input_ids if past is used
1405
+ if past_key_values is not None:
1406
+ past_length = past_key_values[0][0].shape[2]
1407
+
1408
+ # Some generation methods already pass only the last input ID
1409
+ if decoder_input_ids.shape[1] > past_length:
1410
+ remove_prefix_length = past_length
1411
+ else:
1412
+ # Default to old behavior: keep only final ID
1413
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1414
+
1415
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1416
+
1417
+ return {
1418
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1419
+ "encoder_outputs": encoder_outputs,
1420
+ "past_key_values": past_key_values,
1421
+ "decoder_input_ids": decoder_input_ids,
1422
+ "attention_mask": attention_mask,
1423
+ "head_mask": head_mask,
1424
+ "decoder_head_mask": decoder_head_mask,
1425
+ "cross_attn_head_mask": cross_attn_head_mask,
1426
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1427
+ }
1428
+
1429
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1430
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
1431
+
1432
+ @staticmethod
1433
+ def _reorder_cache(past_key_values, beam_idx):
1434
+ reordered_past = ()
1435
+ for layer_past in past_key_values:
1436
+ # cached cross_attention states don't have to be reordered -> they are always the same
1437
+ reordered_past += (
1438
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1439
+ + layer_past[2:],
1440
+ )
1441
+ return reordered_past
1442
+
1443
+
1444
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
1445
+ class PegasusDecoderWrapper(PegasusPreTrainedModel):
1446
+ """
1447
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1448
+ used in combination with the [`EncoderDecoderModel`] framework.
1449
+ """
1450
+
1451
+ def __init__(self, config):
1452
+ super().__init__(config)
1453
+ self.decoder = PegasusDecoder(config)
1454
+
1455
+ def forward(self, *args, **kwargs):
1456
+ return self.decoder(*args, **kwargs)
1457
+
1458
+
1459
+ class PegasusForCausalLM(PegasusPreTrainedModel):
1460
+ _tied_weights_keys = ["lm_head.weight"]
1461
+
1462
+ def __init__(self, config):
1463
+ config = copy.deepcopy(config)
1464
+ config.is_decoder = True
1465
+ config.is_encoder_decoder = False
1466
+ super().__init__(config)
1467
+ self.model = PegasusDecoderWrapper(config)
1468
+
1469
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1470
+
1471
+ # Initialize weights and apply final processing
1472
+ self.post_init()
1473
+
1474
+ def get_input_embeddings(self):
1475
+ return self.model.decoder.embed_tokens
1476
+
1477
+ def set_input_embeddings(self, value):
1478
+ self.model.decoder.embed_tokens = value
1479
+
1480
+ def get_output_embeddings(self):
1481
+ return self.lm_head
1482
+
1483
+ def set_output_embeddings(self, new_embeddings):
1484
+ self.lm_head = new_embeddings
1485
+
1486
+ def set_decoder(self, decoder):
1487
+ self.model.decoder = decoder
1488
+
1489
+ def get_decoder(self):
1490
+ return self.model.decoder
1491
+
1492
+ def get_position_embeddings(self) -> nn.Embedding:
1493
+ """
1494
+ Returns the position embeddings matrix
1495
+ """
1496
+ return self.model.decoder.get_position_embeddings()
1497
+
1498
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1499
+ """
1500
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
1501
+ config.max_position_embeddings`.
1502
+
1503
+ Arguments:
1504
+ new_num_position_embeddings (`int`):
1505
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1506
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1507
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1508
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1509
+ will remove vectors from the end.
1510
+ """
1511
+ self.config.max_position_embeddings = new_num_position_embeddings
1512
+ self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
1513
+
1514
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1515
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM.forward with Bart->Pegasus, facebook/bart-base->google/pegasus-large
1516
+ def forward(
1517
+ self,
1518
+ input_ids: torch.LongTensor = None,
1519
+ attention_mask: Optional[torch.Tensor] = None,
1520
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1521
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1522
+ head_mask: Optional[torch.Tensor] = None,
1523
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1524
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1525
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1526
+ labels: Optional[torch.LongTensor] = None,
1527
+ use_cache: Optional[bool] = None,
1528
+ output_attentions: Optional[bool] = None,
1529
+ output_hidden_states: Optional[bool] = None,
1530
+ return_dict: Optional[bool] = None,
1531
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1532
+ r"""
1533
+ Args:
1534
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1535
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1536
+ provide it.
1537
+
1538
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1539
+ [`PreTrainedTokenizer.__call__`] for details.
1540
+
1541
+ [What are input IDs?](../glossary#input-ids)
1542
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1543
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1544
+
1545
+ - 1 for tokens that are **not masked**,
1546
+ - 0 for tokens that are **masked**.
1547
+
1548
+ [What are attention masks?](../glossary#attention-mask)
1549
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1550
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1551
+ if the model is configured as a decoder.
1552
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1553
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
1554
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1555
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1556
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1557
+
1558
+ - 1 indicates the head is **not masked**,
1559
+ - 0 indicates the head is **masked**.
1560
+
1561
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1562
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1563
+
1564
+ - 1 indicates the head is **not masked**,
1565
+ - 0 indicates the head is **masked**.
1566
+
1567
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1568
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1569
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1570
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
1571
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
1572
+
1573
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1574
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1575
+
1576
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1577
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1578
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1579
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1580
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1581
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1582
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1583
+ use_cache (`bool`, *optional*):
1584
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1585
+ (see `past_key_values`).
1586
+
1587
+ - 1 for tokens that are **not masked**,
1588
+ - 0 for tokens that are **masked**.
1589
+ output_attentions (`bool`, *optional*):
1590
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1591
+ returned tensors for more detail.
1592
+ output_hidden_states (`bool`, *optional*):
1593
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1594
+ for more detail.
1595
+ return_dict (`bool`, *optional*):
1596
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1597
+
1598
+ Returns:
1599
+
1600
+ Example:
1601
+
1602
+ ```python
1603
+ >>> from transformers import AutoTokenizer, PegasusForCausalLM
1604
+
1605
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
1606
+ >>> model = PegasusForCausalLM.from_pretrained("google/pegasus-large", add_cross_attention=False)
1607
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
1608
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1609
+ >>> outputs = model(**inputs)
1610
+
1611
+ >>> logits = outputs.logits
1612
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
1613
+ >>> list(logits.shape) == expected_shape
1614
+ True
1615
+ ```"""
1616
+
1617
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1618
+ output_hidden_states = (
1619
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1620
+ )
1621
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1622
+
1623
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1624
+ outputs = self.model.decoder(
1625
+ input_ids=input_ids,
1626
+ attention_mask=attention_mask,
1627
+ encoder_hidden_states=encoder_hidden_states,
1628
+ encoder_attention_mask=encoder_attention_mask,
1629
+ head_mask=head_mask,
1630
+ cross_attn_head_mask=cross_attn_head_mask,
1631
+ past_key_values=past_key_values,
1632
+ inputs_embeds=inputs_embeds,
1633
+ use_cache=use_cache,
1634
+ output_attentions=output_attentions,
1635
+ output_hidden_states=output_hidden_states,
1636
+ return_dict=return_dict,
1637
+ )
1638
+
1639
+ logits = self.lm_head(outputs[0])
1640
+
1641
+ loss = None
1642
+ if labels is not None:
1643
+ labels = labels.to(logits.device)
1644
+ loss_fct = CrossEntropyLoss()
1645
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
1646
+
1647
+ if not return_dict:
1648
+ output = (logits,) + outputs[1:]
1649
+ return (loss,) + output if loss is not None else output
1650
+
1651
+ return CausalLMOutputWithCrossAttentions(
1652
+ loss=loss,
1653
+ logits=logits,
1654
+ past_key_values=outputs.past_key_values,
1655
+ hidden_states=outputs.hidden_states,
1656
+ attentions=outputs.attentions,
1657
+ cross_attentions=outputs.cross_attentions,
1658
+ )
1659
+
1660
+ def prepare_inputs_for_generation(
1661
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1662
+ ):
1663
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1664
+ if attention_mask is None:
1665
+ attention_mask = input_ids.new_ones(input_ids.shape)
1666
+
1667
+ if past_key_values:
1668
+ past_length = past_key_values[0][0].shape[2]
1669
+
1670
+ # Some generation methods already pass only the last input ID
1671
+ if input_ids.shape[1] > past_length:
1672
+ remove_prefix_length = past_length
1673
+ else:
1674
+ # Default to old behavior: keep only final ID
1675
+ remove_prefix_length = input_ids.shape[1] - 1
1676
+
1677
+ input_ids = input_ids[:, remove_prefix_length:]
1678
+ # first step, decoder_cached_states are empty
1679
+ return {
1680
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
1681
+ "attention_mask": attention_mask,
1682
+ "past_key_values": past_key_values,
1683
+ "use_cache": use_cache,
1684
+ }
1685
+
1686
+ @staticmethod
1687
+ def _reorder_cache(past_key_values, beam_idx):
1688
+ reordered_past = ()
1689
+ for layer_past in past_key_values:
1690
+ reordered_past += (
1691
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1692
+ )
1693
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/pegasus/modeling_tf_pegasus.py ADDED
@@ -0,0 +1,1572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, Google Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Pegasus model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFSeq2SeqLMOutput,
31
+ TFSeq2SeqModelOutput,
32
+ )
33
+
34
+ # Public API
35
+ from ...modeling_tf_utils import (
36
+ TFCausalLanguageModelingLoss,
37
+ TFModelInputType,
38
+ TFPreTrainedModel,
39
+ keras,
40
+ keras_serializable,
41
+ unpack_inputs,
42
+ )
43
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
44
+ from ...utils import (
45
+ add_code_sample_docstrings,
46
+ add_end_docstrings,
47
+ add_start_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_pegasus import PegasusConfig
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CHECKPOINT_FOR_DOC = "google/pegasus-large"
58
+ _CONFIG_FOR_DOC = "PegasusConfig"
59
+
60
+
61
+ LARGE_NEGATIVE = -1e8
62
+
63
+
64
+ # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
65
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
66
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
67
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
68
+ start_tokens = tf.fill(
69
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
70
+ )
71
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
72
+ # replace possible -100 values in labels by `pad_token_id`
73
+ shifted_input_ids = tf.where(
74
+ shifted_input_ids == -100,
75
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
76
+ shifted_input_ids,
77
+ )
78
+
79
+ # "Verify that `labels` has only positive values and -100"
80
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
81
+
82
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
83
+ with tf.control_dependencies([assert_gte0]):
84
+ shifted_input_ids = tf.identity(shifted_input_ids)
85
+
86
+ return shifted_input_ids
87
+
88
+
89
+ # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
90
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
91
+ """
92
+ Make causal mask used for bi-directional self-attention.
93
+ """
94
+ bsz = input_ids_shape[0]
95
+ tgt_len = input_ids_shape[1]
96
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
97
+ mask_cond = tf.range(shape_list(mask)[-1])
98
+
99
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
100
+
101
+ if past_key_values_length > 0:
102
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
103
+
104
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
105
+
106
+
107
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
108
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
109
+ """
110
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
111
+ """
112
+ src_len = shape_list(mask)[1]
113
+ tgt_len = tgt_len if tgt_len is not None else src_len
114
+ one_cst = tf.constant(1.0)
115
+ mask = tf.cast(mask, dtype=one_cst.dtype)
116
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
117
+
118
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
119
+
120
+
121
+ # Copied from transformers.models.marian.modeling_tf_marian.TFMarianSinusoidalPositionalEmbedding with Marian->Pegasus
122
+ class TFPegasusSinusoidalPositionalEmbedding(keras.layers.Layer):
123
+ """This module produces sinusoidal positional embeddings of any length."""
124
+
125
+ def __init__(self, num_positions: int, embedding_dim: int, **kwargs):
126
+ super().__init__(**kwargs)
127
+
128
+ if embedding_dim % 2 != 0:
129
+ raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
130
+
131
+ self.embedding_dim = embedding_dim
132
+ self.num_positions = num_positions
133
+
134
+ def build(self, input_shape: tf.TensorShape):
135
+ """
136
+ Build shared token embedding layer Shared weights logic adapted from
137
+ https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
138
+ """
139
+
140
+ weight = self._init_weight(self.num_positions, self.embedding_dim)
141
+
142
+ self.weight = self.add_weight(
143
+ name="embeddings",
144
+ shape=[self.num_positions, self.embedding_dim],
145
+ )
146
+ weight = tf.cast(weight, dtype=self.weight.dtype)
147
+
148
+ self.weight.assign(weight)
149
+
150
+ super().build(input_shape)
151
+
152
+ @staticmethod
153
+ def _init_weight(n_pos: int, dim: int):
154
+ """
155
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
156
+ the 2nd half of the vector. [dim // 2:]
157
+ """
158
+ position_enc = np.array(
159
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
160
+ )
161
+ table = np.zeros_like(position_enc)
162
+ # index 0 is all zero
163
+ table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
164
+ table[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
165
+ # convert to tensor
166
+ table = tf.convert_to_tensor(table)
167
+ tf.stop_gradient(table)
168
+ return table
169
+
170
+ def call(
171
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
172
+ ):
173
+ """Input is expected to be of size [bsz x seqlen]."""
174
+ if position_ids is None:
175
+ seq_len = input_shape[1]
176
+ position_ids = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
177
+ return tf.gather(self.weight, position_ids)
178
+
179
+
180
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Pegasus
181
+ class TFPegasusAttention(keras.layers.Layer):
182
+ """Multi-headed attention from "Attention Is All You Need"""
183
+
184
+ def __init__(
185
+ self,
186
+ embed_dim: int,
187
+ num_heads: int,
188
+ dropout: float = 0.0,
189
+ is_decoder: bool = False,
190
+ bias: bool = True,
191
+ **kwargs,
192
+ ):
193
+ super().__init__(**kwargs)
194
+ self.embed_dim = embed_dim
195
+
196
+ self.num_heads = num_heads
197
+ self.dropout = keras.layers.Dropout(dropout)
198
+ self.head_dim = embed_dim // num_heads
199
+ if (self.head_dim * num_heads) != self.embed_dim:
200
+ raise ValueError(
201
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
202
+ f" and `num_heads`: {num_heads})."
203
+ )
204
+ self.scaling = self.head_dim**-0.5
205
+ self.is_decoder = is_decoder
206
+
207
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
208
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
209
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
210
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
211
+
212
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
213
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
214
+
215
+ def call(
216
+ self,
217
+ hidden_states: tf.Tensor,
218
+ key_value_states: tf.Tensor | None = None,
219
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
220
+ attention_mask: tf.Tensor | None = None,
221
+ layer_head_mask: tf.Tensor | None = None,
222
+ training: Optional[bool] = False,
223
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
224
+ """Input shape: Batch x Time x Channel"""
225
+
226
+ # if key_value_states are provided this layer is used as a cross-attention layer
227
+ # for the decoder
228
+ is_cross_attention = key_value_states is not None
229
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
230
+
231
+ # get query proj
232
+ query_states = self.q_proj(hidden_states) * self.scaling
233
+ # get key, value proj
234
+ if is_cross_attention and past_key_value is not None:
235
+ # reuse k,v, cross_attentions
236
+ key_states = past_key_value[0]
237
+ value_states = past_key_value[1]
238
+ elif is_cross_attention:
239
+ # cross_attentions
240
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
241
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
242
+ elif past_key_value is not None:
243
+ # reuse k, v, self_attention
244
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
245
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
246
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
247
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
248
+ else:
249
+ # self_attention
250
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
251
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
252
+
253
+ if self.is_decoder:
254
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
255
+ # Further calls to cross_attention layer can then reuse all cross-attention
256
+ # key/value_states (first "if" case)
257
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
258
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
259
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
260
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
261
+ past_key_value = (key_states, value_states)
262
+
263
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
264
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
265
+ key_states = tf.reshape(key_states, proj_shape)
266
+ value_states = tf.reshape(value_states, proj_shape)
267
+
268
+ src_len = shape_list(key_states)[1]
269
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
270
+
271
+ tf.debugging.assert_equal(
272
+ shape_list(attn_weights),
273
+ [bsz * self.num_heads, tgt_len, src_len],
274
+ message=(
275
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
276
+ f" {shape_list(attn_weights)}"
277
+ ),
278
+ )
279
+
280
+ if attention_mask is not None:
281
+ tf.debugging.assert_equal(
282
+ shape_list(attention_mask),
283
+ [bsz, 1, tgt_len, src_len],
284
+ message=(
285
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
286
+ f" {shape_list(attention_mask)}"
287
+ ),
288
+ )
289
+
290
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
291
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
292
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
293
+
294
+ attn_weights = stable_softmax(attn_weights, axis=-1)
295
+
296
+ if layer_head_mask is not None:
297
+ tf.debugging.assert_equal(
298
+ shape_list(layer_head_mask),
299
+ [self.num_heads],
300
+ message=(
301
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
302
+ f" {shape_list(layer_head_mask)}"
303
+ ),
304
+ )
305
+
306
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
307
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
308
+ )
309
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
310
+
311
+ attn_probs = self.dropout(attn_weights, training=training)
312
+ attn_output = tf.matmul(attn_probs, value_states)
313
+
314
+ tf.debugging.assert_equal(
315
+ shape_list(attn_output),
316
+ [bsz * self.num_heads, tgt_len, self.head_dim],
317
+ message=(
318
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
319
+ f" {shape_list(attn_output)}"
320
+ ),
321
+ )
322
+
323
+ attn_output = tf.transpose(
324
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
325
+ )
326
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
327
+
328
+ attn_output = self.out_proj(attn_output)
329
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
330
+
331
+ return attn_output, attn_weights, past_key_value
332
+
333
+ def build(self, input_shape=None):
334
+ if self.built:
335
+ return
336
+ self.built = True
337
+ if getattr(self, "k_proj", None) is not None:
338
+ with tf.name_scope(self.k_proj.name):
339
+ self.k_proj.build([None, None, self.embed_dim])
340
+ if getattr(self, "q_proj", None) is not None:
341
+ with tf.name_scope(self.q_proj.name):
342
+ self.q_proj.build([None, None, self.embed_dim])
343
+ if getattr(self, "v_proj", None) is not None:
344
+ with tf.name_scope(self.v_proj.name):
345
+ self.v_proj.build([None, None, self.embed_dim])
346
+ if getattr(self, "out_proj", None) is not None:
347
+ with tf.name_scope(self.out_proj.name):
348
+ self.out_proj.build([None, None, self.embed_dim])
349
+
350
+
351
+ # Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Pegasus
352
+ class TFPegasusEncoderLayer(keras.layers.Layer):
353
+ def __init__(self, config: PegasusConfig, **kwargs):
354
+ super().__init__(**kwargs)
355
+ self.embed_dim = config.d_model
356
+ self.self_attn = TFPegasusAttention(
357
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
358
+ )
359
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
360
+ self.dropout = keras.layers.Dropout(config.dropout)
361
+ self.activation_fn = get_tf_activation(config.activation_function)
362
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
363
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
364
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
365
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
366
+ self.config = config
367
+
368
+ def call(
369
+ self,
370
+ hidden_states: tf.Tensor,
371
+ attention_mask: tf.Tensor,
372
+ layer_head_mask: tf.Tensor,
373
+ training: Optional[bool] = False,
374
+ ):
375
+ """
376
+ Args:
377
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
378
+ attention_mask (`tf.Tensor`): attention mask of size
379
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
380
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
381
+ *(encoder_attention_heads,)*
382
+ """
383
+ residual = hidden_states
384
+ hidden_states = self.self_attn_layer_norm(hidden_states)
385
+ hidden_states, self_attn_weights, _ = self.self_attn(
386
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
387
+ )
388
+
389
+ tf.debugging.assert_equal(
390
+ shape_list(hidden_states),
391
+ shape_list(residual),
392
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
393
+ )
394
+
395
+ hidden_states = self.dropout(hidden_states, training=training)
396
+ hidden_states = residual + hidden_states
397
+
398
+ residual = hidden_states
399
+ hidden_states = self.final_layer_norm(hidden_states)
400
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
401
+ hidden_states = self.activation_dropout(hidden_states, training=training)
402
+ hidden_states = self.fc2(hidden_states)
403
+ hidden_states = self.dropout(hidden_states, training=training)
404
+ hidden_states = residual + hidden_states
405
+
406
+ return hidden_states, self_attn_weights
407
+
408
+ def build(self, input_shape=None):
409
+ if self.built:
410
+ return
411
+ self.built = True
412
+ if getattr(self, "self_attn", None) is not None:
413
+ with tf.name_scope(self.self_attn.name):
414
+ self.self_attn.build(None)
415
+ if getattr(self, "self_attn_layer_norm", None) is not None:
416
+ with tf.name_scope(self.self_attn_layer_norm.name):
417
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
418
+ if getattr(self, "fc1", None) is not None:
419
+ with tf.name_scope(self.fc1.name):
420
+ self.fc1.build([None, None, self.embed_dim])
421
+ if getattr(self, "fc2", None) is not None:
422
+ with tf.name_scope(self.fc2.name):
423
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
424
+ if getattr(self, "final_layer_norm", None) is not None:
425
+ with tf.name_scope(self.final_layer_norm.name):
426
+ self.final_layer_norm.build([None, None, self.embed_dim])
427
+
428
+
429
+ # Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Pegasus
430
+ class TFPegasusDecoderLayer(keras.layers.Layer):
431
+ def __init__(self, config: PegasusConfig, **kwargs):
432
+ super().__init__(**kwargs)
433
+ self.embed_dim = config.d_model
434
+ self.self_attn = TFPegasusAttention(
435
+ embed_dim=self.embed_dim,
436
+ num_heads=config.decoder_attention_heads,
437
+ dropout=config.attention_dropout,
438
+ name="self_attn",
439
+ is_decoder=True,
440
+ )
441
+ self.dropout = keras.layers.Dropout(config.dropout)
442
+ self.activation_fn = get_tf_activation(config.activation_function)
443
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
444
+
445
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
446
+ self.encoder_attn = TFPegasusAttention(
447
+ self.embed_dim,
448
+ config.decoder_attention_heads,
449
+ dropout=config.attention_dropout,
450
+ name="encoder_attn",
451
+ is_decoder=True,
452
+ )
453
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
454
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
455
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
456
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
457
+ self.config = config
458
+
459
+ def call(
460
+ self,
461
+ hidden_states: tf.Tensor,
462
+ attention_mask: tf.Tensor | None = None,
463
+ encoder_hidden_states: tf.Tensor | None = None,
464
+ encoder_attention_mask: tf.Tensor | None = None,
465
+ layer_head_mask: tf.Tensor | None = None,
466
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
467
+ past_key_value: Tuple[tf.Tensor] | None = None,
468
+ training: Optional[bool] = False,
469
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
470
+ """
471
+ Args:
472
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
473
+ attention_mask (`tf.Tensor`): attention mask of size
474
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
475
+ encoder_hidden_states (`tf.Tensor`):
476
+ cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
477
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
478
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
479
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
480
+ *(decoder_attention_heads,)*
481
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
482
+ *(decoder_attention_heads,)*
483
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
484
+ """
485
+ residual = hidden_states
486
+ hidden_states = self.self_attn_layer_norm(hidden_states)
487
+
488
+ # Self Attention
489
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
490
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
491
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
492
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
493
+ hidden_states=hidden_states,
494
+ past_key_value=self_attn_past_key_value,
495
+ attention_mask=attention_mask,
496
+ layer_head_mask=layer_head_mask,
497
+ )
498
+ hidden_states = self.dropout(hidden_states, training=training)
499
+ hidden_states = residual + hidden_states
500
+
501
+ # Cross-Attention Block
502
+ cross_attn_present_key_value = None
503
+ cross_attn_weights = None
504
+ if encoder_hidden_states is not None:
505
+ residual = hidden_states
506
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
507
+
508
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
509
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
510
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
511
+ hidden_states=hidden_states,
512
+ key_value_states=encoder_hidden_states,
513
+ attention_mask=encoder_attention_mask,
514
+ layer_head_mask=cross_attn_layer_head_mask,
515
+ past_key_value=cross_attn_past_key_value,
516
+ )
517
+ hidden_states = self.dropout(hidden_states, training=training)
518
+ hidden_states = residual + hidden_states
519
+
520
+ # add cross-attn to positions 3,4 of present_key_value tuple
521
+ present_key_value = present_key_value + cross_attn_present_key_value
522
+
523
+ # Fully Connected
524
+ residual = hidden_states
525
+ hidden_states = self.final_layer_norm(hidden_states)
526
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
527
+ hidden_states = self.activation_dropout(hidden_states, training=training)
528
+ hidden_states = self.fc2(hidden_states)
529
+ hidden_states = self.dropout(hidden_states, training=training)
530
+ hidden_states = residual + hidden_states
531
+
532
+ return (
533
+ hidden_states,
534
+ self_attn_weights,
535
+ cross_attn_weights,
536
+ present_key_value,
537
+ )
538
+
539
+ def build(self, input_shape=None):
540
+ if self.built:
541
+ return
542
+ self.built = True
543
+ if getattr(self, "self_attn", None) is not None:
544
+ with tf.name_scope(self.self_attn.name):
545
+ self.self_attn.build(None)
546
+ if getattr(self, "self_attn_layer_norm", None) is not None:
547
+ with tf.name_scope(self.self_attn_layer_norm.name):
548
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
549
+ if getattr(self, "encoder_attn", None) is not None:
550
+ with tf.name_scope(self.encoder_attn.name):
551
+ self.encoder_attn.build(None)
552
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
553
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
554
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
555
+ if getattr(self, "fc1", None) is not None:
556
+ with tf.name_scope(self.fc1.name):
557
+ self.fc1.build([None, None, self.embed_dim])
558
+ if getattr(self, "fc2", None) is not None:
559
+ with tf.name_scope(self.fc2.name):
560
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
561
+ if getattr(self, "final_layer_norm", None) is not None:
562
+ with tf.name_scope(self.final_layer_norm.name):
563
+ self.final_layer_norm.build([None, None, self.embed_dim])
564
+
565
+
566
+ class TFPegasusPreTrainedModel(TFPreTrainedModel):
567
+ config_class = PegasusConfig
568
+ base_model_prefix = "model"
569
+
570
+
571
+ PEGASUS_START_DOCSTRING = r"""
572
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
573
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
574
+ etc.)
575
+
576
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
577
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
578
+ behavior.
579
+
580
+ <Tip>
581
+
582
+ TensorFlow models and layers in `transformers` accept two formats as input:
583
+
584
+ - having all inputs as keyword arguments (like PyTorch models), or
585
+ - having all inputs as a list, tuple or dict in the first positional argument.
586
+
587
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
588
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
589
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
590
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
591
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
592
+ positional argument:
593
+
594
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
595
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
596
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
597
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
598
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
599
+
600
+ Note that when creating models and layers with
601
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
602
+ about any of this, as you can just pass inputs like you would to any other Python function!
603
+
604
+ </Tip>
605
+
606
+ Args:
607
+ config ([`PegasusConfig`]): Model configuration class with all the parameters of the model.
608
+ Initializing with a config file does not load the weights associated with the model, only the
609
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
610
+ """
611
+
612
+ PEGASUS_GENERATION_EXAMPLE = r"""
613
+ Summarization example:
614
+
615
+ ```python
616
+ >>> from transformers import AutoTokenizer, TFPegasusForConditionalGeneration
617
+
618
+ >>> model = TFPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
619
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
620
+
621
+ >>> ARTICLE_TO_SUMMARIZE = (
622
+ ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
623
+ ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
624
+ ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
625
+ ... )
626
+ >>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="tf")
627
+
628
+ >>> # Generate Summary
629
+ >>> summary_ids = model.generate(input_ids)
630
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
631
+ ```
632
+ """
633
+
634
+ PEGASUS_INPUTS_DOCSTRING = r"""
635
+ Args:
636
+ input_ids (`tf.Tensor` of shape `({0})`):
637
+ Indices of input sequence tokens in the vocabulary.
638
+
639
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
640
+ [`PreTrainedTokenizer.__call__`] for details.
641
+
642
+ [What are input IDs?](../glossary#input-ids)
643
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
644
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
645
+
646
+ - 1 for tokens that are **not masked**,
647
+ - 0 for tokens that are **masked**.
648
+
649
+ [What are attention masks?](../glossary#attention-mask)
650
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
651
+ Indices of decoder input sequence tokens in the vocabulary.
652
+
653
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
654
+ [`PreTrainedTokenizer.__call__`] for details.
655
+
656
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
657
+
658
+ Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
659
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
660
+ `past_key_values`).
661
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
662
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
663
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
664
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
665
+ range `[0, config.max_position_embeddings - 1]`.
666
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
667
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
668
+
669
+ - 1 indicates the head is **not masked**,
670
+ - 0 indicates the head is **masked**.
671
+
672
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
673
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
674
+
675
+ - 1 indicates the head is **not masked**,
676
+ - 0 indicates the head is **masked**.
677
+
678
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
679
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
680
+
681
+ - 1 indicates the head is **not masked**,
682
+ - 0 indicates the head is **masked**.
683
+
684
+ encoder_outputs (`tf.FloatTensor`, *optional*):
685
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
686
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
687
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
688
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
689
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
690
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
691
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
692
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
693
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
694
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
695
+ than the model's internal embedding lookup matrix.
696
+ use_cache (`bool`, *optional*, defaults to `True`):
697
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
698
+ `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`,
699
+ *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions`
700
+ under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the
701
+ value in the config will be used instead.
702
+ output_attentions (`bool`, *optional*):
703
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
704
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
705
+ config will be used instead.
706
+ output_hidden_states (`bool`, *optional*):
707
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
708
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
709
+ used instead.
710
+ return_dict (`bool`, *optional*):
711
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
712
+ eager mode, in graph mode the value will always be set to True.
713
+ training (`bool`, *optional*, defaults to `False`):
714
+ Whether or not to use the model in training mode (some modules like dropout modules have different
715
+ behaviors between training and evaluation).
716
+ """
717
+
718
+
719
+ @keras_serializable
720
+ class TFPegasusEncoder(keras.layers.Layer):
721
+ config_class = PegasusConfig
722
+ """
723
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
724
+ [`TFPegasusEncoderLayer`].
725
+
726
+ Args:
727
+ config: PegasusConfig
728
+ """
729
+
730
+ def __init__(self, config: PegasusConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
731
+ super().__init__(**kwargs)
732
+ self.config = config
733
+ self.dropout = keras.layers.Dropout(config.dropout)
734
+ self.layerdrop = config.encoder_layerdrop
735
+ self.padding_idx = config.pad_token_id
736
+ self.max_source_positions = config.max_position_embeddings
737
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
738
+
739
+ self.embed_tokens = embed_tokens
740
+ self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
741
+ config.max_position_embeddings,
742
+ config.d_model,
743
+ name="embed_positions",
744
+ )
745
+ self.layers = [TFPegasusEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
746
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
747
+
748
+ def get_embed_tokens(self):
749
+ return self.embed_tokens
750
+
751
+ def set_embed_tokens(self, embed_tokens):
752
+ self.embed_tokens = embed_tokens
753
+
754
+ @unpack_inputs
755
+ def call(
756
+ self,
757
+ input_ids: tf.Tensor | None = None,
758
+ inputs_embeds: tf.Tensor | None = None,
759
+ attention_mask: tf.Tensor | None = None,
760
+ head_mask: tf.Tensor | None = None,
761
+ output_attentions: Optional[bool] = None,
762
+ output_hidden_states: Optional[bool] = None,
763
+ return_dict: Optional[bool] = None,
764
+ training: Optional[bool] = False,
765
+ ):
766
+ """
767
+ Args:
768
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
769
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
770
+ provide it.
771
+
772
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
773
+ [`PreTrainedTokenizer.__call__`] for details.
774
+
775
+ [What are input IDs?](../glossary#input-ids)
776
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
777
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
778
+
779
+ - 1 for tokens that are **not masked**,
780
+ - 0 for tokens that are **masked**.
781
+
782
+ [What are attention masks?](../glossary#attention-mask)
783
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
784
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
785
+
786
+ - 1 indicates the head is **not masked**,
787
+ - 0 indicates the head is **masked**.
788
+
789
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
790
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
791
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
792
+ than the model's internal embedding lookup matrix.
793
+ output_attentions (`bool`, *optional*):
794
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
795
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
796
+ in the config will be used instead.
797
+ output_hidden_states (`bool`, *optional*):
798
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
799
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
800
+ will be used instead.
801
+ return_dict (`bool`, *optional*):
802
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
803
+ in eager mode, in graph mode the value will always be set to True.
804
+ training (`bool`, *optional*, defaults to `False`):
805
+ Whether or not to use the model in training mode (some modules like dropout modules have different
806
+ behaviors between training and evaluation).
807
+ """
808
+
809
+ if input_ids is not None and inputs_embeds is not None:
810
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
811
+ elif input_ids is not None:
812
+ input_shape = shape_list(input_ids)
813
+ elif inputs_embeds is not None:
814
+ input_shape = shape_list(inputs_embeds)[:-1]
815
+ else:
816
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
817
+
818
+ if inputs_embeds is None:
819
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
820
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
821
+
822
+ embed_pos = self.embed_positions(input_shape)
823
+ hidden_states = inputs_embeds + embed_pos
824
+ hidden_states = self.dropout(hidden_states, training=training)
825
+
826
+ # check attention mask and invert
827
+ if attention_mask is not None:
828
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
829
+ attention_mask = _expand_mask(attention_mask)
830
+ else:
831
+ attention_mask = None
832
+
833
+ encoder_states = () if output_hidden_states else None
834
+ all_attentions = () if output_attentions else None
835
+
836
+ # check if head_mask has a correct number of layers specified if desired
837
+ if head_mask is not None:
838
+ tf.debugging.assert_equal(
839
+ shape_list(head_mask)[0],
840
+ len(self.layers),
841
+ message=(
842
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
843
+ f" {shape_list(head_mask)[0]}."
844
+ ),
845
+ )
846
+
847
+ # encoder layers
848
+ for idx, encoder_layer in enumerate(self.layers):
849
+ if output_hidden_states:
850
+ encoder_states = encoder_states + (hidden_states,)
851
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
852
+ dropout_probability = random.uniform(0, 1)
853
+ if training and (dropout_probability < self.layerdrop): # skip the layer
854
+ continue
855
+
856
+ hidden_states, attn = encoder_layer(
857
+ hidden_states,
858
+ attention_mask,
859
+ head_mask[idx] if head_mask is not None else None,
860
+ )
861
+
862
+ if output_attentions:
863
+ all_attentions += (attn,)
864
+
865
+ hidden_states = self.layer_norm(hidden_states)
866
+
867
+ if output_hidden_states:
868
+ encoder_states = encoder_states + (hidden_states,)
869
+
870
+ if not return_dict:
871
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
872
+ return TFBaseModelOutput(
873
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
874
+ )
875
+
876
+ def build(self, input_shape=None):
877
+ if self.built:
878
+ return
879
+ self.built = True
880
+ if getattr(self, "embed_positions", None) is not None:
881
+ with tf.name_scope(self.embed_positions.name):
882
+ self.embed_positions.build(None)
883
+ if getattr(self, "layer_norm", None) is not None:
884
+ with tf.name_scope(self.layer_norm.name):
885
+ self.layer_norm.build([None, None, self.config.d_model])
886
+ if getattr(self, "layers", None) is not None:
887
+ for layer in self.layers:
888
+ with tf.name_scope(layer.name):
889
+ layer.build(None)
890
+
891
+
892
+ @keras_serializable
893
+ class TFPegasusDecoder(keras.layers.Layer):
894
+ config_class = PegasusConfig
895
+ """
896
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFPegasusDecoderLayer`]
897
+
898
+ Args:
899
+ config: PegasusConfig
900
+ embed_tokens: output embedding
901
+ """
902
+
903
+ def __init__(self, config: PegasusConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
904
+ super().__init__(**kwargs)
905
+ self.config = config
906
+ self.padding_idx = config.pad_token_id
907
+ self.embed_tokens = embed_tokens
908
+ self.layerdrop = config.decoder_layerdrop
909
+ self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
910
+ config.max_position_embeddings,
911
+ config.d_model,
912
+ name="embed_positions",
913
+ )
914
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
915
+ self.layers = [TFPegasusDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
916
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
917
+
918
+ self.dropout = keras.layers.Dropout(config.dropout)
919
+
920
+ def get_embed_tokens(self):
921
+ return self.embed_tokens
922
+
923
+ def set_embed_tokens(self, embed_tokens):
924
+ self.embed_tokens = embed_tokens
925
+
926
+ @unpack_inputs
927
+ def call(
928
+ self,
929
+ input_ids: tf.Tensor | None = None,
930
+ inputs_embeds: tf.Tensor | None = None,
931
+ attention_mask: tf.Tensor | None = None,
932
+ position_ids: tf.Tensor | None = None,
933
+ encoder_hidden_states: tf.Tensor | None = None,
934
+ encoder_attention_mask: tf.Tensor | None = None,
935
+ head_mask: tf.Tensor | None = None,
936
+ cross_attn_head_mask: tf.Tensor | None = None,
937
+ past_key_values: Tuple[Tuple[tf.Tensor]] = None,
938
+ use_cache: Optional[bool] = None,
939
+ output_attentions: Optional[bool] = None,
940
+ output_hidden_states: Optional[bool] = None,
941
+ return_dict: Optional[bool] = None,
942
+ training: Optional[bool] = False,
943
+ ):
944
+ r"""
945
+ Args:
946
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
947
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
948
+ provide it.
949
+
950
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
951
+ [`PreTrainedTokenizer.__call__`] for details.
952
+
953
+ [What are input IDs?](../glossary#input-ids)
954
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
955
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
956
+
957
+ - 1 for tokens that are **not masked**,
958
+ - 0 for tokens that are **masked**.
959
+
960
+ [What are attention masks?](../glossary#attention-mask)
961
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
962
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
963
+ range `[0, config.max_position_embeddings - 1]`.
964
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
965
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
966
+ of the decoder.
967
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
968
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
969
+ selected in `[0, 1]`:
970
+
971
+ - 1 for tokens that are **not masked**,
972
+ - 0 for tokens that are **masked**.
973
+
974
+ [What are attention masks?](../glossary#attention-mask)
975
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
976
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
977
+
978
+ - 1 indicates the head is **not masked**,
979
+ - 0 indicates the head is **masked**.
980
+
981
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
982
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
983
+
984
+ - 1 indicates the head is **not masked**,
985
+ - 0 indicates the head is **masked**.
986
+
987
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
988
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
989
+ decoding.
990
+
991
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
992
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
993
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
994
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
995
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
996
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
997
+ than the model's internal embedding lookup matrix.
998
+ output_attentions (`bool`, *optional*):
999
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1000
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
1001
+ in the config will be used instead.
1002
+ output_hidden_states (`bool`, *optional*):
1003
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1004
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
1005
+ will be used instead.
1006
+ return_dict (`bool`, *optional*):
1007
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
1008
+ in eager mode, in graph mode the value will always be set to True.
1009
+ training (`bool`, *optional*, defaults to `False`):
1010
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1011
+ behaviors between training and evaluation).
1012
+ """
1013
+
1014
+ if input_ids is not None and inputs_embeds is not None:
1015
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1016
+ elif input_ids is not None:
1017
+ input_shape = shape_list(input_ids)
1018
+ elif inputs_embeds is not None:
1019
+ input_shape = shape_list(inputs_embeds)[:-1]
1020
+ else:
1021
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1022
+
1023
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
1024
+
1025
+ # embed positions
1026
+ if position_ids is None:
1027
+ positions = self.embed_positions(input_shape, past_key_values_length)
1028
+ else:
1029
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
1030
+
1031
+ if inputs_embeds is None:
1032
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
1033
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1034
+
1035
+ hidden_states = inputs_embeds
1036
+
1037
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1038
+ if input_shape[-1] > 1:
1039
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
1040
+ else:
1041
+ combined_attention_mask = _expand_mask(
1042
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
1043
+ )
1044
+
1045
+ if attention_mask is not None:
1046
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1047
+
1048
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1049
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1050
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1051
+
1052
+ hidden_states = self.dropout(hidden_states + positions, training=training)
1053
+
1054
+ # decoder layers
1055
+ all_hidden_states = () if output_hidden_states else None
1056
+ all_self_attns = () if output_attentions else None
1057
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1058
+ present_key_values = () if use_cache else None
1059
+
1060
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1061
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1062
+ if attn_mask is not None:
1063
+ tf.debugging.assert_equal(
1064
+ shape_list(attn_mask)[0],
1065
+ len(self.layers),
1066
+ message=(
1067
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1068
+ f" {shape_list(attn_mask)[0]}."
1069
+ ),
1070
+ )
1071
+
1072
+ for idx, decoder_layer in enumerate(self.layers):
1073
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1074
+ if output_hidden_states:
1075
+ all_hidden_states += (hidden_states,)
1076
+ dropout_probability = random.uniform(0, 1)
1077
+
1078
+ if training and (dropout_probability < self.layerdrop):
1079
+ continue
1080
+
1081
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1082
+
1083
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1084
+ hidden_states,
1085
+ attention_mask=combined_attention_mask,
1086
+ encoder_hidden_states=encoder_hidden_states,
1087
+ encoder_attention_mask=encoder_attention_mask,
1088
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1089
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1090
+ past_key_value=past_key_value,
1091
+ )
1092
+
1093
+ if use_cache:
1094
+ present_key_values += (present_key_value,)
1095
+
1096
+ if output_attentions:
1097
+ all_self_attns += (layer_self_attn,)
1098
+
1099
+ if encoder_hidden_states is not None:
1100
+ all_cross_attns += (layer_cross_attn,)
1101
+
1102
+ hidden_states = self.layer_norm(hidden_states)
1103
+
1104
+ if output_hidden_states:
1105
+ all_hidden_states += (hidden_states,)
1106
+
1107
+ if not return_dict:
1108
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
1109
+ else:
1110
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1111
+ last_hidden_state=hidden_states,
1112
+ past_key_values=present_key_values,
1113
+ hidden_states=all_hidden_states,
1114
+ attentions=all_self_attns,
1115
+ cross_attentions=all_cross_attns,
1116
+ )
1117
+
1118
+ def build(self, input_shape=None):
1119
+ if self.built:
1120
+ return
1121
+ self.built = True
1122
+ if getattr(self, "embed_positions", None) is not None:
1123
+ with tf.name_scope(self.embed_positions.name):
1124
+ self.embed_positions.build(None)
1125
+ if getattr(self, "layer_norm", None) is not None:
1126
+ with tf.name_scope(self.layer_norm.name):
1127
+ self.layer_norm.build([None, None, self.config.d_model])
1128
+ if getattr(self, "layers", None) is not None:
1129
+ for layer in self.layers:
1130
+ with tf.name_scope(layer.name):
1131
+ layer.build(None)
1132
+
1133
+
1134
+ @keras_serializable
1135
+ class TFPegasusMainLayer(keras.layers.Layer):
1136
+ config_class = PegasusConfig
1137
+
1138
+ def __init__(self, config: PegasusConfig, **kwargs):
1139
+ super().__init__(**kwargs)
1140
+
1141
+ self.config = config
1142
+ self.shared = keras.layers.Embedding(
1143
+ input_dim=config.vocab_size,
1144
+ output_dim=config.d_model,
1145
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
1146
+ name="model.shared",
1147
+ )
1148
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
1149
+ self.shared.load_weight_prefix = "model.shared"
1150
+
1151
+ self.encoder = TFPegasusEncoder(config, self.shared, name="encoder")
1152
+ self.decoder = TFPegasusDecoder(config, self.shared, name="decoder")
1153
+
1154
+ def get_input_embeddings(self):
1155
+ return self.shared
1156
+
1157
+ def set_input_embeddings(self, new_embeddings):
1158
+ self.shared = new_embeddings
1159
+ self.encoder.embed_tokens = self.shared
1160
+ self.decoder.embed_tokens = self.shared
1161
+
1162
+ @unpack_inputs
1163
+ def call(
1164
+ self,
1165
+ input_ids: tf.Tensor | None = None,
1166
+ attention_mask: tf.Tensor | None = None,
1167
+ decoder_input_ids: tf.Tensor | None = None,
1168
+ decoder_attention_mask: tf.Tensor | None = None,
1169
+ decoder_position_ids: tf.Tensor | None = None,
1170
+ head_mask: tf.Tensor | None = None,
1171
+ decoder_head_mask: tf.Tensor | None = None,
1172
+ cross_attn_head_mask: tf.Tensor | None = None,
1173
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1174
+ past_key_values: Tuple[Tuple[tf.Tensor]] = None,
1175
+ inputs_embeds: tf.Tensor | None = None,
1176
+ decoder_inputs_embeds: tf.Tensor | None = None,
1177
+ use_cache: Optional[bool] = None,
1178
+ output_attentions: Optional[bool] = None,
1179
+ output_hidden_states: Optional[bool] = None,
1180
+ return_dict: Optional[bool] = None,
1181
+ training: Optional[bool] = False,
1182
+ **kwargs,
1183
+ ):
1184
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1185
+ use_cache = False
1186
+
1187
+ output_hidden_states = (
1188
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1189
+ )
1190
+
1191
+ if encoder_outputs is None:
1192
+ encoder_outputs = self.encoder(
1193
+ input_ids=input_ids,
1194
+ attention_mask=attention_mask,
1195
+ head_mask=head_mask,
1196
+ inputs_embeds=inputs_embeds,
1197
+ output_attentions=output_attentions,
1198
+ output_hidden_states=output_hidden_states,
1199
+ return_dict=return_dict,
1200
+ training=training,
1201
+ )
1202
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1203
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1204
+ encoder_outputs = TFBaseModelOutput(
1205
+ last_hidden_state=encoder_outputs[0],
1206
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1207
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1208
+ )
1209
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1210
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1211
+ encoder_outputs = encoder_outputs.to_tuple()
1212
+
1213
+ decoder_outputs = self.decoder(
1214
+ decoder_input_ids,
1215
+ attention_mask=decoder_attention_mask,
1216
+ position_ids=decoder_position_ids,
1217
+ encoder_hidden_states=encoder_outputs[0],
1218
+ encoder_attention_mask=attention_mask,
1219
+ head_mask=decoder_head_mask,
1220
+ cross_attn_head_mask=cross_attn_head_mask,
1221
+ past_key_values=past_key_values,
1222
+ inputs_embeds=decoder_inputs_embeds,
1223
+ use_cache=use_cache,
1224
+ output_attentions=output_attentions,
1225
+ output_hidden_states=output_hidden_states,
1226
+ return_dict=return_dict,
1227
+ training=training,
1228
+ )
1229
+
1230
+ if not return_dict:
1231
+ return decoder_outputs + encoder_outputs
1232
+
1233
+ return TFSeq2SeqModelOutput(
1234
+ last_hidden_state=decoder_outputs.last_hidden_state,
1235
+ past_key_values=decoder_outputs.past_key_values,
1236
+ decoder_hidden_states=decoder_outputs.hidden_states,
1237
+ decoder_attentions=decoder_outputs.attentions,
1238
+ cross_attentions=decoder_outputs.cross_attentions,
1239
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1240
+ encoder_hidden_states=encoder_outputs.hidden_states,
1241
+ encoder_attentions=encoder_outputs.attentions,
1242
+ )
1243
+
1244
+ def build(self, input_shape=None):
1245
+ if self.built:
1246
+ return
1247
+ self.built = True
1248
+ # The shared/tied weights expect to be in the model base namespace
1249
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
1250
+ # the current one.
1251
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
1252
+ self.shared.build(None)
1253
+ if getattr(self, "encoder", None) is not None:
1254
+ with tf.name_scope(self.encoder.name):
1255
+ self.encoder.build(None)
1256
+ if getattr(self, "decoder", None) is not None:
1257
+ with tf.name_scope(self.decoder.name):
1258
+ self.decoder.build(None)
1259
+
1260
+
1261
+ @add_start_docstrings(
1262
+ "The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
1263
+ PEGASUS_START_DOCSTRING,
1264
+ )
1265
+ class TFPegasusModel(TFPegasusPreTrainedModel):
1266
+ def __init__(self, config: PegasusConfig, *inputs, **kwargs):
1267
+ super().__init__(config, *inputs, **kwargs)
1268
+
1269
+ self.model = TFPegasusMainLayer(config, name="model")
1270
+
1271
+ def get_encoder(self):
1272
+ return self.model.encoder
1273
+
1274
+ def get_decoder(self):
1275
+ return self.model.decoder
1276
+
1277
+ @unpack_inputs
1278
+ @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1279
+ @add_code_sample_docstrings(
1280
+ checkpoint=_CHECKPOINT_FOR_DOC,
1281
+ output_type=TFSeq2SeqModelOutput,
1282
+ config_class=_CONFIG_FOR_DOC,
1283
+ )
1284
+ def call(
1285
+ self,
1286
+ input_ids: TFModelInputType | None = None,
1287
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1288
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1289
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1290
+ decoder_position_ids: np.ndarray | tf.Tensor | None = None,
1291
+ head_mask: np.ndarray | tf.Tensor | None = None,
1292
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1293
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1294
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1295
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1296
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1297
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1298
+ use_cache: Optional[bool] = None,
1299
+ output_attentions: Optional[bool] = None,
1300
+ output_hidden_states: Optional[bool] = None,
1301
+ return_dict: Optional[bool] = None,
1302
+ training: bool = False,
1303
+ **kwargs,
1304
+ ) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]:
1305
+ outputs = self.model(
1306
+ input_ids=input_ids,
1307
+ attention_mask=attention_mask,
1308
+ decoder_input_ids=decoder_input_ids,
1309
+ decoder_attention_mask=decoder_attention_mask,
1310
+ decoder_position_ids=decoder_position_ids,
1311
+ head_mask=head_mask,
1312
+ decoder_head_mask=decoder_head_mask,
1313
+ cross_attn_head_mask=cross_attn_head_mask,
1314
+ encoder_outputs=encoder_outputs,
1315
+ past_key_values=past_key_values,
1316
+ inputs_embeds=inputs_embeds,
1317
+ decoder_inputs_embeds=decoder_inputs_embeds,
1318
+ use_cache=use_cache,
1319
+ output_attentions=output_attentions,
1320
+ output_hidden_states=output_hidden_states,
1321
+ return_dict=return_dict,
1322
+ training=training,
1323
+ )
1324
+
1325
+ return outputs
1326
+
1327
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
1328
+ def serving_output(self, output):
1329
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1330
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1331
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1332
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1333
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1334
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1335
+
1336
+ return TFSeq2SeqModelOutput(
1337
+ last_hidden_state=output.last_hidden_state,
1338
+ past_key_values=pkv,
1339
+ decoder_hidden_states=dec_hs,
1340
+ decoder_attentions=dec_attns,
1341
+ cross_attentions=cross_attns,
1342
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1343
+ encoder_hidden_states=enc_hs,
1344
+ encoder_attentions=enc_attns,
1345
+ )
1346
+
1347
+ def build(self, input_shape=None):
1348
+ if self.built:
1349
+ return
1350
+ self.built = True
1351
+ if getattr(self, "model", None) is not None:
1352
+ with tf.name_scope(self.model.name):
1353
+ self.model.build(None)
1354
+
1355
+
1356
+ # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
1357
+ class BiasLayer(keras.layers.Layer):
1358
+ """
1359
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
1360
+ so all weights have to be registered in a layer.
1361
+ """
1362
+
1363
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
1364
+ super().__init__(name=name, **kwargs)
1365
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
1366
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
1367
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
1368
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
1369
+
1370
+ def call(self, x):
1371
+ return x + self.bias
1372
+
1373
+
1374
+ @add_start_docstrings(
1375
+ "The PEGASUS Model with a language modeling head. Can be used for summarization.",
1376
+ PEGASUS_START_DOCSTRING,
1377
+ )
1378
+ class TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLanguageModelingLoss):
1379
+ _keys_to_ignore_on_load_unexpected = [
1380
+ r"model.encoder.embed_tokens.weight",
1381
+ r"model.decoder.embed_tokens.weight",
1382
+ ]
1383
+
1384
+ def __init__(self, config, *inputs, **kwargs):
1385
+ super().__init__(config, *inputs, **kwargs)
1386
+ self.model = TFPegasusMainLayer(config, name="model")
1387
+ self.use_cache = config.use_cache
1388
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
1389
+ self.bias_layer = BiasLayer(
1390
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
1391
+ )
1392
+
1393
+ def get_decoder(self):
1394
+ return self.model.decoder
1395
+
1396
+ def get_encoder(self):
1397
+ return self.model.encoder
1398
+
1399
+ def get_output_embeddings(self):
1400
+ return self.get_input_embeddings()
1401
+
1402
+ def set_output_embeddings(self, value):
1403
+ self.set_input_embeddings(value)
1404
+
1405
+ def get_bias(self):
1406
+ return {"final_logits_bias": self.bias_layer.bias}
1407
+
1408
+ def set_bias(self, value):
1409
+ # Replaces the existing layers containing bias for correct (de)serialization.
1410
+ vocab_size = value["final_logits_bias"].shape[-1]
1411
+ self.bias_layer = BiasLayer(
1412
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
1413
+ )
1414
+ self.bias_layer.bias.assign(value["final_logits_bias"])
1415
+
1416
+ @unpack_inputs
1417
+ @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
1418
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1419
+ @add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
1420
+ def call(
1421
+ self,
1422
+ input_ids: TFModelInputType | None = None,
1423
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1424
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1425
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1426
+ decoder_position_ids: np.ndarray | tf.Tensor | None = None,
1427
+ head_mask: np.ndarray | tf.Tensor | None = None,
1428
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1429
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1430
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1431
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1432
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1433
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1434
+ use_cache: Optional[bool] = None,
1435
+ output_attentions: Optional[bool] = None,
1436
+ output_hidden_states: Optional[bool] = None,
1437
+ return_dict: Optional[bool] = None,
1438
+ labels: np.ndarray | tf.Tensor | None = None,
1439
+ training: bool = False,
1440
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
1441
+ """
1442
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
1443
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1444
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1445
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1446
+
1447
+ Returns:
1448
+
1449
+ """
1450
+
1451
+ if labels is not None:
1452
+ labels = tf.where(
1453
+ labels == self.config.pad_token_id,
1454
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
1455
+ labels,
1456
+ )
1457
+ use_cache = False
1458
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1459
+ decoder_input_ids = shift_tokens_right(
1460
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1461
+ )
1462
+
1463
+ outputs = self.model(
1464
+ input_ids,
1465
+ attention_mask=attention_mask,
1466
+ decoder_input_ids=decoder_input_ids,
1467
+ encoder_outputs=encoder_outputs,
1468
+ decoder_attention_mask=decoder_attention_mask,
1469
+ decoder_position_ids=decoder_position_ids,
1470
+ head_mask=head_mask,
1471
+ decoder_head_mask=decoder_head_mask,
1472
+ cross_attn_head_mask=cross_attn_head_mask,
1473
+ past_key_values=past_key_values,
1474
+ inputs_embeds=inputs_embeds,
1475
+ decoder_inputs_embeds=decoder_inputs_embeds,
1476
+ use_cache=use_cache,
1477
+ output_attentions=output_attentions,
1478
+ output_hidden_states=output_hidden_states,
1479
+ return_dict=return_dict,
1480
+ training=training,
1481
+ )
1482
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
1483
+ lm_logits = self.bias_layer(lm_logits)
1484
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1485
+
1486
+ if not return_dict:
1487
+ output = (lm_logits,) + outputs[1:]
1488
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1489
+ return TFSeq2SeqLMOutput(
1490
+ loss=masked_lm_loss,
1491
+ logits=lm_logits,
1492
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
1493
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
1494
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
1495
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
1496
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
1497
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
1498
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
1499
+ )
1500
+
1501
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
1502
+ def serving_output(self, output):
1503
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1504
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1505
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1506
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1507
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1508
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1509
+
1510
+ return TFSeq2SeqLMOutput(
1511
+ logits=output.logits,
1512
+ past_key_values=pkv,
1513
+ decoder_hidden_states=dec_hs,
1514
+ decoder_attentions=dec_attns,
1515
+ cross_attentions=cross_attns,
1516
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1517
+ encoder_hidden_states=enc_hs,
1518
+ encoder_attentions=enc_attns,
1519
+ )
1520
+
1521
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
1522
+ def prepare_inputs_for_generation(
1523
+ self,
1524
+ decoder_input_ids,
1525
+ past_key_values=None,
1526
+ attention_mask=None,
1527
+ decoder_attention_mask=None,
1528
+ head_mask=None,
1529
+ decoder_head_mask=None,
1530
+ cross_attn_head_mask=None,
1531
+ use_cache=None,
1532
+ encoder_outputs=None,
1533
+ **kwargs,
1534
+ ):
1535
+ # cut decoder_input_ids if past_key_values is used
1536
+ if past_key_values is not None:
1537
+ decoder_input_ids = decoder_input_ids[:, -1:]
1538
+
1539
+ if decoder_attention_mask is not None: # xla
1540
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
1541
+ elif past_key_values is not None: # no xla + past_key_values
1542
+ decoder_position_ids = past_key_values[0][0].shape[2]
1543
+ else: # no xla + no past_key_values
1544
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
1545
+
1546
+ return {
1547
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1548
+ "encoder_outputs": encoder_outputs,
1549
+ "past_key_values": past_key_values,
1550
+ "decoder_input_ids": decoder_input_ids,
1551
+ "attention_mask": attention_mask,
1552
+ "decoder_attention_mask": decoder_attention_mask,
1553
+ "decoder_position_ids": decoder_position_ids,
1554
+ "head_mask": head_mask,
1555
+ "decoder_head_mask": decoder_head_mask,
1556
+ "cross_attn_head_mask": cross_attn_head_mask,
1557
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1558
+ }
1559
+
1560
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
1561
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
1562
+
1563
+ def build(self, input_shape=None):
1564
+ if self.built:
1565
+ return
1566
+ self.built = True
1567
+ if getattr(self, "model", None) is not None:
1568
+ with tf.name_scope(self.model.name):
1569
+ self.model.build(None)
1570
+ if getattr(self, "bias_layer", None) is not None:
1571
+ with tf.name_scope(self.bias_layer.name):
1572
+ self.bias_layer.build(None)
venv/lib/python3.10/site-packages/transformers/models/pegasus/tokenization_pegasus.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Google and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from shutil import copyfile
17
+ from typing import Any, Dict, List, Optional, Tuple
18
+
19
+ import sentencepiece as spm
20
+
21
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
22
+ from ...utils import logging
23
+
24
+
25
+ SPIECE_UNDERLINE = "▁"
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ # TODO ArthurZ refactor this to only use the added_tokens_encoder
34
+ class PegasusTokenizer(PreTrainedTokenizer):
35
+ r"""
36
+ Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
37
+
38
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
39
+ this superclass for more information regarding those methods.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
44
+ contains the vocabulary necessary to instantiate a tokenizer.
45
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
46
+ The token used for padding, for example when batching sequences of different lengths.
47
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
48
+ The end of sequence token.
49
+
50
+ <Tip>
51
+
52
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
53
+ The token used is the `sep_token`.
54
+
55
+ </Tip>
56
+
57
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
58
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
59
+ token instead.
60
+ mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
61
+ The token used for masking single token values. This is the token used when training this model with masked
62
+ language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
63
+ It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
64
+ Summarization](https://arxiv.org/pdf/1912.08777.pdf).
65
+ mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
66
+ The token used for masking whole target sentences. This is the token used when training this model with gap
67
+ sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
68
+ pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
69
+ Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
70
+ additional_special_tokens (`List[str]`, *optional*):
71
+ Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
72
+ <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
73
+ tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
74
+ that uses the tokens 2 - 104 only for pretraining
75
+ sp_model_kwargs (`dict`, *optional*):
76
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
77
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
78
+ to set:
79
+
80
+ - `enable_sampling`: Enable subword regularization.
81
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
82
+
83
+ - `nbest_size = {0,1}`: No sampling is performed.
84
+ - `nbest_size > 1`: samples from the nbest_size results.
85
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
86
+ using forward-filtering-and-backward-sampling algorithm.
87
+
88
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
89
+ BPE-dropout.
90
+ """
91
+
92
+ vocab_files_names = VOCAB_FILES_NAMES
93
+ model_input_names = ["input_ids", "attention_mask"]
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_file,
98
+ pad_token="<pad>",
99
+ eos_token="</s>",
100
+ unk_token="<unk>",
101
+ mask_token="<mask_2>",
102
+ mask_token_sent="<mask_1>",
103
+ additional_special_tokens=None,
104
+ offset=103, # entries 2 - 104 are only used for pretraining
105
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
106
+ **kwargs,
107
+ ) -> None:
108
+ self.offset = offset
109
+ if additional_special_tokens is not None:
110
+ if not isinstance(additional_special_tokens, list):
111
+ raise TypeError(
112
+ f"additional_special_tokens should be of type {type(list)}, but is"
113
+ f" {type(additional_special_tokens)}"
114
+ )
115
+ additional_special_tokens_extended = (
116
+ ([mask_token_sent] + additional_special_tokens)
117
+ if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
118
+ else additional_special_tokens
119
+ )
120
+ # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
121
+ additional_special_tokens_extended += [
122
+ f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
123
+ ]
124
+
125
+ if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
126
+ raise ValueError(
127
+ "Please make sure that the provided additional_special_tokens do not contain an incorrectly"
128
+ f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
129
+ )
130
+ additional_special_tokens = additional_special_tokens_extended
131
+ else:
132
+ additional_special_tokens_extended = []
133
+ additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
134
+ additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
135
+
136
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
137
+ self.mask_token_sent = mask_token_sent
138
+ self.vocab_file = vocab_file
139
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
140
+ self.sp_model.Load(vocab_file)
141
+
142
+ _added_tokens_decoder = {
143
+ 0: AddedToken(str(pad_token), special=True),
144
+ 1: AddedToken(str(eos_token), special=True),
145
+ }
146
+
147
+ if self.mask_token_sent is not None:
148
+ _added_tokens_decoder[2] = AddedToken(mask_token_sent, special=True)
149
+ _added_tokens_decoder[3] = AddedToken(str(mask_token), special=True)
150
+
151
+ for i in range(2, self.offset):
152
+ _added_tokens_decoder[len(_added_tokens_decoder)] = AddedToken(f"<unk_{i}>", special=True)
153
+
154
+ # Force update as we want to make sure vocab is enforced (same as fast)
155
+ self._added_tokens_decoder = kwargs.pop("added_tokens_decoder", {})
156
+ self._added_tokens_decoder.update(_added_tokens_decoder)
157
+
158
+ super().__init__(
159
+ eos_token=eos_token,
160
+ unk_token=unk_token,
161
+ mask_token=mask_token,
162
+ pad_token=pad_token,
163
+ mask_token_sent=mask_token_sent,
164
+ offset=offset,
165
+ additional_special_tokens=additional_special_tokens,
166
+ sp_model_kwargs=self.sp_model_kwargs,
167
+ **kwargs,
168
+ )
169
+
170
+ @property
171
+ def vocab_size(self) -> int:
172
+ return len(self.sp_model) + self.offset
173
+
174
+ def get_vocab(self) -> Dict[str, int]:
175
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
176
+ vocab.update(self.added_tokens_encoder)
177
+ return vocab
178
+
179
+ def __getstate__(self):
180
+ state = self.__dict__.copy()
181
+ state["sp_model"] = None
182
+ return state
183
+
184
+ def __setstate__(self, d):
185
+ self.__dict__ = d
186
+
187
+ # for backward compatibility
188
+ if not hasattr(self, "sp_model_kwargs"):
189
+ self.sp_model_kwargs = {}
190
+
191
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
192
+ self.sp_model.Load(self.vocab_file)
193
+
194
+ def _tokenize(self, text: str) -> List[str]:
195
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
196
+ return self.sp_model.encode(text, out_type=str)
197
+
198
+ def _convert_token_to_id(self, token: str) -> int:
199
+ """Converts a token (str) to an id using the vocab."""
200
+ sp_id = self.sp_model.piece_to_id(token)
201
+ return sp_id + self.offset
202
+
203
+ def _convert_id_to_token(self, index: int) -> str:
204
+ """Converts an index (integer) to a token (str) using the vocab."""
205
+ if index < self.offset:
206
+ return self.sp_model.IdToPiece(index)
207
+ token = self.sp_model.IdToPiece(index - self.offset)
208
+ return token
209
+
210
+ def convert_tokens_to_string(self, tokens):
211
+ """Converts a sequence of tokens (string) in a single string."""
212
+ current_sub_tokens = []
213
+ out_string = ""
214
+ for token in tokens:
215
+ # make sure that special tokens are not decoded using sentencepiece model
216
+ if token in self.all_special_tokens:
217
+ out_string += self.sp_model.decode(current_sub_tokens) + token
218
+ current_sub_tokens = []
219
+ else:
220
+ current_sub_tokens.append(token)
221
+ out_string += self.sp_model.decode(current_sub_tokens)
222
+ return out_string.strip()
223
+
224
+ def num_special_tokens_to_add(self, pair=False):
225
+ """Just EOS"""
226
+ return 1
227
+
228
+ def _special_token_mask(self, seq):
229
+ all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
230
+ all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
231
+
232
+ return [1 if x in all_special_ids else 0 for x in seq]
233
+
234
+ def get_special_tokens_mask(
235
+ self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
236
+ ) -> List[int]:
237
+ """Get list where entries are [1] if a token is [eos] or [pad] else 0."""
238
+ if already_has_special_tokens:
239
+ return self._special_token_mask(token_ids_0)
240
+ elif token_ids_1 is None:
241
+ return self._special_token_mask(token_ids_0) + [1]
242
+ else:
243
+ return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
244
+
245
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
246
+ """
247
+ Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
248
+ and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:
249
+
250
+ - single sequence: `X </s>`
251
+ - pair of sequences: `A B </s>` (not intended use)
252
+
253
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
254
+ separator.
255
+
256
+ Args:
257
+ token_ids_0 (`List[int]`):
258
+ List of IDs to which the special tokens will be added.
259
+ token_ids_1 (`List[int]`, *optional*):
260
+ Optional second list of IDs for sequence pairs.
261
+
262
+ Returns:
263
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
264
+ """
265
+ if token_ids_1 is None:
266
+ return token_ids_0 + [self.eos_token_id]
267
+ # We don't expect to process pairs, but leave the pair logic for API consistency
268
+ return token_ids_0 + token_ids_1 + [self.eos_token_id]
269
+
270
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
271
+ if not os.path.isdir(save_directory):
272
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
273
+ return
274
+ out_vocab_file = os.path.join(
275
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
276
+ )
277
+
278
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
279
+ copyfile(self.vocab_file, out_vocab_file)
280
+ elif not os.path.isfile(self.vocab_file):
281
+ with open(out_vocab_file, "wb") as fi:
282
+ content_spiece_model = self.sp_model.serialized_model_proto()
283
+ fi.write(content_spiece_model)
284
+
285
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/pegasus/tokenization_pegasus_fast.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Google and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model PEGASUS."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import is_sentencepiece_available, logging
24
+
25
+
26
+ if is_sentencepiece_available():
27
+ from .tokenization_pegasus import PegasusTokenizer
28
+ else:
29
+ PegasusTokenizer = None
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ SPIECE_UNDERLINE = "▁"
36
+
37
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
38
+
39
+
40
+ class PegasusTokenizerFast(PreTrainedTokenizerFast):
41
+ r"""
42
+ Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
43
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
44
+
45
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
46
+ refer to this superclass for more information regarding those methods.
47
+
48
+ Args:
49
+ vocab_file (`str`):
50
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
51
+ contains the vocabulary necessary to instantiate a tokenizer.
52
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
53
+ The token used for padding, for example when batching sequences of different lengths.
54
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
55
+ The end of sequence token.
56
+
57
+ <Tip>
58
+
59
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
60
+ The token used is the `sep_token`.
61
+
62
+ </Tip>
63
+
64
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
65
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
66
+ token instead.
67
+ mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
68
+ The token used for masking single token values. This is the token used when training this model with masked
69
+ language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
70
+ It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
71
+ Summarization](https://arxiv.org/pdf/1912.08777.pdf).
72
+ mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
73
+ The token used for masking whole target sentences. This is the token used when training this model with gap
74
+ sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
75
+ pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
76
+ Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
77
+ additional_special_tokens (`List[str]`, *optional*):
78
+ Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
79
+ <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
80
+ tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
81
+ that uses the tokens 2 - 104 only for pretraining
82
+ """
83
+
84
+ vocab_files_names = VOCAB_FILES_NAMES
85
+ slow_tokenizer_class = PegasusTokenizer
86
+ model_input_names = ["input_ids", "attention_mask"]
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_file=None,
91
+ tokenizer_file=None,
92
+ pad_token="<pad>",
93
+ eos_token="</s>",
94
+ unk_token="<unk>",
95
+ mask_token="<mask_2>",
96
+ mask_token_sent="<mask_1>",
97
+ additional_special_tokens=None,
98
+ offset=103, # entries 2 - 104 are only used for pretraining
99
+ **kwargs,
100
+ ):
101
+ self.offset = offset
102
+
103
+ if additional_special_tokens is not None:
104
+ if not isinstance(additional_special_tokens, list):
105
+ raise TypeError(
106
+ f"additional_special_tokens should be of type {type(list)}, but is"
107
+ f" {type(additional_special_tokens)}"
108
+ )
109
+
110
+ additional_special_tokens_extended = (
111
+ ([mask_token_sent] + additional_special_tokens)
112
+ if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
113
+ else additional_special_tokens
114
+ )
115
+ # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
116
+ additional_special_tokens_extended += [
117
+ f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
118
+ ]
119
+
120
+ if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
121
+ raise ValueError(
122
+ "Please make sure that the provided additional_special_tokens do not contain an incorrectly"
123
+ f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
124
+ )
125
+ additional_special_tokens = additional_special_tokens_extended
126
+ else:
127
+ additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
128
+ additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
129
+
130
+ # pegasus was design to support changing the index of the first tokens. If one of the padding/eos/unk/mask token
131
+ # is different from default, we must rebuild the vocab
132
+ from_slow = kwargs.pop("from_slow", None)
133
+ from_slow = from_slow or str(pad_token) != "<pad>" or str(eos_token) != "</s>" or str(unk_token) != "<unk>"
134
+
135
+ kwargs.pop("added_tokens_decoder", {})
136
+
137
+ super().__init__(
138
+ vocab_file,
139
+ tokenizer_file=tokenizer_file,
140
+ pad_token=pad_token,
141
+ eos_token=eos_token,
142
+ unk_token=unk_token,
143
+ mask_token=mask_token,
144
+ mask_token_sent=mask_token_sent,
145
+ offset=offset,
146
+ additional_special_tokens=additional_special_tokens,
147
+ from_slow=from_slow,
148
+ **kwargs,
149
+ )
150
+ self.vocab_file = vocab_file
151
+
152
+ @property
153
+ def can_save_slow_tokenizer(self) -> bool:
154
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
155
+
156
+ def _special_token_mask(self, seq):
157
+ all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
158
+ all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
159
+
160
+ if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
161
+ raise ValueError(
162
+ "There should be 3 special tokens: mask_token, pad_token, and eos_token +"
163
+ f" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}"
164
+ )
165
+
166
+ return [1 if x in all_special_ids else 0 for x in seq]
167
+
168
+ def get_special_tokens_mask(
169
+ self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
170
+ ) -> List[int]:
171
+ """Get list where entries are [1] if a token is [eos] or [pad] else 0."""
172
+ if already_has_special_tokens:
173
+ return self._special_token_mask(token_ids_0)
174
+ elif token_ids_1 is None:
175
+ return self._special_token_mask(token_ids_0) + [1]
176
+ else:
177
+ return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
178
+
179
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
180
+ """
181
+ Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
182
+
183
+ - single sequence: `X </s>`
184
+ - pair of sequences: `A B </s>` (not intended use)
185
+
186
+ Args:
187
+ token_ids_0 (`List[int]`):
188
+ List of IDs to which the special tokens will be added
189
+ token_ids_1 (`List[int]`, *optional*):
190
+ Optional second list of IDs for sequence pairs.
191
+
192
+ Returns:
193
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
194
+ """
195
+ if token_ids_1 is None:
196
+ return token_ids_0 + [self.eos_token_id]
197
+ # We don't expect to process pairs, but leave the pair logic for API consistency
198
+ return token_ids_0 + token_ids_1 + [self.eos_token_id]
199
+
200
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
201
+ if not self.can_save_slow_tokenizer:
202
+ raise ValueError(
203
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
204
+ "tokenizer."
205
+ )
206
+
207
+ if not os.path.isdir(save_directory):
208
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
209
+ return
210
+ out_vocab_file = os.path.join(
211
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
212
+ )
213
+
214
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
215
+ copyfile(self.vocab_file, out_vocab_file)
216
+
217
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/poolformer/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_poolformer": [
21
+ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "PoolFormerConfig",
23
+ "PoolFormerOnnxConfig",
24
+ ]
25
+ }
26
+
27
+ try:
28
+ if not is_vision_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["feature_extraction_poolformer"] = ["PoolFormerFeatureExtractor"]
34
+ _import_structure["image_processing_poolformer"] = ["PoolFormerImageProcessor"]
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_poolformer"] = [
43
+ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "PoolFormerForImageClassification",
45
+ "PoolFormerModel",
46
+ "PoolFormerPreTrainedModel",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_poolformer import (
52
+ POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
53
+ PoolFormerConfig,
54
+ PoolFormerOnnxConfig,
55
+ )
56
+
57
+ try:
58
+ if not is_vision_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .feature_extraction_poolformer import PoolFormerFeatureExtractor
64
+ from .image_processing_poolformer import PoolFormerImageProcessor
65
+
66
+ try:
67
+ if not is_torch_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_poolformer import (
73
+ POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
74
+ PoolFormerForImageClassification,
75
+ PoolFormerModel,
76
+ PoolFormerPreTrainedModel,
77
+ )
78
+
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/configuration_poolformer.cpython-310.pyc ADDED
Binary file (5.37 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/convert_poolformer_original_to_pytorch.cpython-310.pyc ADDED
Binary file (5.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/feature_extraction_poolformer.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/image_processing_poolformer.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/modeling_poolformer.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/poolformer/configuration_poolformer.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Sea AI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PoolFormer model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class PoolFormerConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a
35
+ PoolFormer model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the PoolFormer
37
+ [sail/poolformer_s12](https://huggingface.co/sail/poolformer_s12) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of channels in the input image.
46
+ patch_size (`int`, *optional*, defaults to 16):
47
+ The size of the input patch.
48
+ stride (`int`, *optional*, defaults to 16):
49
+ The stride of the input patch.
50
+ pool_size (`int`, *optional*, defaults to 3):
51
+ The size of the pooling window.
52
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
53
+ The ratio of the number of channels in the output of the MLP to the number of channels in the input.
54
+ depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):
55
+ The depth of each encoder block.
56
+ hidden_sizes (`list`, *optional*, defaults to `[64, 128, 320, 512]`):
57
+ The hidden sizes of each encoder block.
58
+ patch_sizes (`list`, *optional*, defaults to `[7, 3, 3, 3]`):
59
+ The size of the input patch for each encoder block.
60
+ strides (`list`, *optional*, defaults to `[4, 2, 2, 2]`):
61
+ The stride of the input patch for each encoder block.
62
+ padding (`list`, *optional*, defaults to `[2, 1, 1, 1]`):
63
+ The padding of the input patch for each encoder block.
64
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
65
+ The number of encoder blocks.
66
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
67
+ The dropout rate for the dropout layers.
68
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
69
+ The activation function for the hidden layers.
70
+ use_layer_scale (`bool`, *optional*, defaults to `True`):
71
+ Whether to use layer scale.
72
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-05):
73
+ The initial value for the layer scale.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The initializer range for the weights.
76
+
77
+ Example:
78
+
79
+ ```python
80
+ >>> from transformers import PoolFormerConfig, PoolFormerModel
81
+
82
+ >>> # Initializing a PoolFormer sail/poolformer_s12 style configuration
83
+ >>> configuration = PoolFormerConfig()
84
+
85
+ >>> # Initializing a model (with random weights) from the sail/poolformer_s12 style configuration
86
+ >>> model = PoolFormerModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```
91
+ """
92
+
93
+ model_type = "poolformer"
94
+
95
+ def __init__(
96
+ self,
97
+ num_channels=3,
98
+ patch_size=16,
99
+ stride=16,
100
+ pool_size=3,
101
+ mlp_ratio=4.0,
102
+ depths=[2, 2, 6, 2],
103
+ hidden_sizes=[64, 128, 320, 512],
104
+ patch_sizes=[7, 3, 3, 3],
105
+ strides=[4, 2, 2, 2],
106
+ padding=[2, 1, 1, 1],
107
+ num_encoder_blocks=4,
108
+ drop_path_rate=0.0,
109
+ hidden_act="gelu",
110
+ use_layer_scale=True,
111
+ layer_scale_init_value=1e-5,
112
+ initializer_range=0.02,
113
+ **kwargs,
114
+ ):
115
+ self.num_channels = num_channels
116
+ self.patch_size = patch_size
117
+ self.stride = stride
118
+ self.padding = padding
119
+ self.pool_size = pool_size
120
+ self.hidden_sizes = hidden_sizes
121
+ self.mlp_ratio = mlp_ratio
122
+ self.depths = depths
123
+ self.patch_sizes = patch_sizes
124
+ self.strides = strides
125
+ self.num_encoder_blocks = num_encoder_blocks
126
+ self.drop_path_rate = drop_path_rate
127
+ self.hidden_act = hidden_act
128
+ self.use_layer_scale = use_layer_scale
129
+ self.layer_scale_init_value = layer_scale_init_value
130
+ self.initializer_range = initializer_range
131
+ super().__init__(**kwargs)
132
+
133
+
134
+ class PoolFormerOnnxConfig(OnnxConfig):
135
+ torch_onnx_minimum_version = version.parse("1.11")
136
+
137
+ @property
138
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
139
+ return OrderedDict(
140
+ [
141
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
142
+ ]
143
+ )
144
+
145
+ @property
146
+ def atol_for_validation(self) -> float:
147
+ return 2e-3
venv/lib/python3.10/site-packages/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert PoolFormer checkpoints from the original repository. URL: https://github.com/sail-sg/poolformer"""
16
+
17
+ import argparse
18
+ import json
19
+ from collections import OrderedDict
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def replace_key_with_offset(key, offset, original_name, new_name):
36
+ """
37
+ Replaces the key by subtracting the offset from the original layer number
38
+ """
39
+ to_find = original_name.split(".")[0]
40
+ key_list = key.split(".")
41
+ orig_block_num = int(key_list[key_list.index(to_find) - 2])
42
+ layer_num = int(key_list[key_list.index(to_find) - 1])
43
+ new_block_num = orig_block_num - offset
44
+
45
+ key = key.replace(f"{orig_block_num}.{layer_num}.{original_name}", f"block.{new_block_num}.{layer_num}.{new_name}")
46
+ return key
47
+
48
+
49
+ def rename_keys(state_dict):
50
+ new_state_dict = OrderedDict()
51
+ total_embed_found, patch_emb_offset = 0, 0
52
+ for key, value in state_dict.items():
53
+ if key.startswith("network"):
54
+ key = key.replace("network", "poolformer.encoder")
55
+ if "proj" in key:
56
+ # Works for the first embedding as well as the internal embedding layers
57
+ if key.endswith("bias") and "patch_embed" not in key:
58
+ patch_emb_offset += 1
59
+ to_replace = key[: key.find("proj")]
60
+ key = key.replace(to_replace, f"patch_embeddings.{total_embed_found}.")
61
+ key = key.replace("proj", "projection")
62
+ if key.endswith("bias"):
63
+ total_embed_found += 1
64
+ if "patch_embeddings" in key:
65
+ key = "poolformer.encoder." + key
66
+ if "mlp.fc1" in key:
67
+ key = replace_key_with_offset(key, patch_emb_offset, "mlp.fc1", "output.conv1")
68
+ if "mlp.fc2" in key:
69
+ key = replace_key_with_offset(key, patch_emb_offset, "mlp.fc2", "output.conv2")
70
+ if "norm1" in key:
71
+ key = replace_key_with_offset(key, patch_emb_offset, "norm1", "before_norm")
72
+ if "norm2" in key:
73
+ key = replace_key_with_offset(key, patch_emb_offset, "norm2", "after_norm")
74
+ if "layer_scale_1" in key:
75
+ key = replace_key_with_offset(key, patch_emb_offset, "layer_scale_1", "layer_scale_1")
76
+ if "layer_scale_2" in key:
77
+ key = replace_key_with_offset(key, patch_emb_offset, "layer_scale_2", "layer_scale_2")
78
+ if "head" in key:
79
+ key = key.replace("head", "classifier")
80
+ new_state_dict[key] = value
81
+ return new_state_dict
82
+
83
+
84
+ # We will verify our results on a COCO image
85
+ def prepare_img():
86
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
87
+ image = Image.open(requests.get(url, stream=True).raw)
88
+
89
+ return image
90
+
91
+
92
+ @torch.no_grad()
93
+ def convert_poolformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path):
94
+ """
95
+ Copy/paste/tweak model's weights to our PoolFormer structure.
96
+ """
97
+
98
+ # load default PoolFormer configuration
99
+ config = PoolFormerConfig()
100
+
101
+ # set attributes based on model_name
102
+ repo_id = "huggingface/label-files"
103
+ size = model_name[-3:]
104
+ config.num_labels = 1000
105
+ filename = "imagenet-1k-id2label.json"
106
+ expected_shape = (1, 1000)
107
+
108
+ # set config attributes
109
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
110
+ id2label = {int(k): v for k, v in id2label.items()}
111
+ config.id2label = id2label
112
+ config.label2id = {v: k for k, v in id2label.items()}
113
+ if size == "s12":
114
+ config.depths = [2, 2, 6, 2]
115
+ config.hidden_sizes = [64, 128, 320, 512]
116
+ config.mlp_ratio = 4.0
117
+ crop_pct = 0.9
118
+ elif size == "s24":
119
+ config.depths = [4, 4, 12, 4]
120
+ config.hidden_sizes = [64, 128, 320, 512]
121
+ config.mlp_ratio = 4.0
122
+ crop_pct = 0.9
123
+ elif size == "s36":
124
+ config.depths = [6, 6, 18, 6]
125
+ config.hidden_sizes = [64, 128, 320, 512]
126
+ config.mlp_ratio = 4.0
127
+ config.layer_scale_init_value = 1e-6
128
+ crop_pct = 0.9
129
+ elif size == "m36":
130
+ config.depths = [6, 6, 18, 6]
131
+ config.hidden_sizes = [96, 192, 384, 768]
132
+ config.mlp_ratio = 4.0
133
+ config.layer_scale_init_value = 1e-6
134
+ crop_pct = 0.95
135
+ elif size == "m48":
136
+ config.depths = [8, 8, 24, 8]
137
+ config.hidden_sizes = [96, 192, 384, 768]
138
+ config.mlp_ratio = 4.0
139
+ config.layer_scale_init_value = 1e-6
140
+ crop_pct = 0.95
141
+ else:
142
+ raise ValueError(f"Size {size} not supported")
143
+
144
+ # load image processor
145
+ image_processor = PoolFormerImageProcessor(crop_pct=crop_pct)
146
+
147
+ # Prepare image
148
+ image = prepare_img()
149
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
150
+
151
+ logger.info(f"Converting model {model_name}...")
152
+
153
+ # load original state dict
154
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
155
+
156
+ # rename keys
157
+ state_dict = rename_keys(state_dict)
158
+
159
+ # create HuggingFace model and load state dict
160
+ model = PoolFormerForImageClassification(config)
161
+ model.load_state_dict(state_dict)
162
+ model.eval()
163
+
164
+ # Define image processor
165
+ image_processor = PoolFormerImageProcessor(crop_pct=crop_pct)
166
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
167
+
168
+ # forward pass
169
+ outputs = model(pixel_values)
170
+ logits = outputs.logits
171
+
172
+ # define expected logit slices for different models
173
+ if size == "s12":
174
+ expected_slice = torch.tensor([-0.3045, -0.6758, -0.4869])
175
+ elif size == "s24":
176
+ expected_slice = torch.tensor([0.4402, -0.1374, -0.8045])
177
+ elif size == "s36":
178
+ expected_slice = torch.tensor([-0.6080, -0.5133, -0.5898])
179
+ elif size == "m36":
180
+ expected_slice = torch.tensor([0.3952, 0.2263, -1.2668])
181
+ elif size == "m48":
182
+ expected_slice = torch.tensor([0.1167, -0.0656, -0.3423])
183
+ else:
184
+ raise ValueError(f"Size {size} not supported")
185
+
186
+ # verify logits
187
+ assert logits.shape == expected_shape
188
+ assert torch.allclose(logits[0, :3], expected_slice, atol=1e-2)
189
+
190
+ # finally, save model and image processor
191
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
192
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
193
+ model.save_pretrained(pytorch_dump_folder_path)
194
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
195
+ image_processor.save_pretrained(pytorch_dump_folder_path)
196
+
197
+
198
+ if __name__ == "__main__":
199
+ parser = argparse.ArgumentParser()
200
+
201
+ parser.add_argument(
202
+ "--model_name",
203
+ default="poolformer_s12",
204
+ type=str,
205
+ help="Name of the model you'd like to convert.",
206
+ )
207
+ parser.add_argument(
208
+ "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
209
+ )
210
+ parser.add_argument(
211
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
212
+ )
213
+ args = parser.parse_args()
214
+ convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
venv/lib/python3.10/site-packages/transformers/models/poolformer/feature_extraction_poolformer.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for PoolFormer."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_poolformer import PoolFormerImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class PoolFormerFeatureExtractor(PoolFormerImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use PoolFormerImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/poolformer/image_processing_poolformer.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for PoolFormer."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_DEFAULT_MEAN,
29
+ IMAGENET_DEFAULT_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, is_vision_available, logging
42
+
43
+
44
+ if is_vision_available():
45
+ import PIL
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ class PoolFormerImageProcessor(BaseImageProcessor):
52
+ r"""
53
+ Constructs a PoolFormer image processor.
54
+
55
+ Args:
56
+ do_resize (`bool`, *optional*, defaults to `True`):
57
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
58
+ `do_resize` in the `preprocess` method.
59
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
60
+ Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. If crop_pct is
61
+ unset:
62
+ - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
63
+ - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
64
+ aspect ratio.
65
+
66
+ If crop_pct is set:
67
+ - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
68
+ int(floor(w/crop_pct)))`
69
+ - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
70
+ whilst maintaining the aspect ratio.
71
+ - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
72
+ whilst maintaining the aspect ratio.
73
+ crop_pct (`float`, *optional*, defaults to 0.9):
74
+ Percentage of the image to crop from the center. Can be overridden by `crop_pct` in the `preprocess`
75
+ method.
76
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
77
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
78
+ do_center_crop (`bool`, *optional*, defaults to `True`):
79
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
80
+ is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in the `preprocess`
81
+ method.
82
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
83
+ Size of the image after applying center crop. Only has an effect if `do_center_crop` is set to `True`. Can
84
+ be overridden by the `crop_size` parameter in the `preprocess` method.
85
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
86
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
87
+ `preprocess` method.
88
+ do_rescale (`bool`, *optional*, defaults to `True`):
89
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
90
+ parameter in the `preprocess` method.
91
+ do_normalize (`bool`, *optional*, defaults to `True`):
92
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
93
+ `preprocess` method.
94
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
95
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
96
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
97
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
98
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
99
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
100
+ """
101
+
102
+ model_input_names = ["pixel_values"]
103
+
104
+ def __init__(
105
+ self,
106
+ do_resize: bool = True,
107
+ size: Dict[str, int] = None,
108
+ crop_pct: int = 0.9,
109
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
110
+ do_center_crop: bool = True,
111
+ crop_size: Dict[str, int] = None,
112
+ rescale_factor: Union[int, float] = 1 / 255,
113
+ do_rescale: bool = True,
114
+ do_normalize: bool = True,
115
+ image_mean: Optional[Union[float, List[float]]] = None,
116
+ image_std: Optional[Union[float, List[float]]] = None,
117
+ **kwargs,
118
+ ) -> None:
119
+ super().__init__(**kwargs)
120
+ size = size if size is not None else {"shortest_edge": 224}
121
+ size = get_size_dict(size, default_to_square=False)
122
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
123
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
124
+
125
+ self.do_resize = do_resize
126
+ self.size = size
127
+ self.crop_pct = crop_pct
128
+ self.resample = resample
129
+ self.do_center_crop = do_center_crop
130
+ self.crop_size = crop_size
131
+ self.do_rescale = do_rescale
132
+ self.rescale_factor = rescale_factor
133
+ self.do_normalize = do_normalize
134
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
135
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
136
+ self._valid_processor_keys = [
137
+ "images",
138
+ "do_resize",
139
+ "size",
140
+ "crop_pct",
141
+ "resample",
142
+ "do_center_crop",
143
+ "crop_size",
144
+ "do_rescale",
145
+ "rescale_factor",
146
+ "do_normalize",
147
+ "image_mean",
148
+ "image_std",
149
+ "return_tensors",
150
+ "data_format",
151
+ "input_data_format",
152
+ ]
153
+
154
+ def resize(
155
+ self,
156
+ image: np.ndarray,
157
+ size: Dict[str, int],
158
+ crop_pct: Optional[float] = None,
159
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
160
+ data_format: Optional[Union[str, ChannelDimension]] = None,
161
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
162
+ **kwargs,
163
+ ) -> np.ndarray:
164
+ """
165
+ Resize an image.
166
+
167
+ If crop_pct is unset:
168
+ - size is `{"height": h, "width": w}`: the image is resized to `(h, w)`.
169
+ - size is `{"shortest_edge": s}`: the shortest edge of the image is resized to s whilst maintaining the
170
+ aspect ratio.
171
+
172
+ if crop_pct is set:
173
+ - size is `{"height": h, "width": w}`: the image is resized to `(int(floor(h/crop_pct)),
174
+ int(floor(w/crop_pct)))`
175
+ - size is `{"height": c, "width": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
176
+ whilst maintaining the aspect ratio.
177
+ - size is `{"shortest_edge": c}`: the shortest edge of the image is resized to `int(floor(c/crop_pct)`
178
+ whilst maintaining the aspect ratio.
179
+
180
+ Args:
181
+ image (`np.ndarray`):
182
+ Image to resize.
183
+ size (`Dict[str, int]`):
184
+ Size of the output image.
185
+ crop_pct (`float`, *optional*):
186
+ Percentage of the image that will be cropped from the center. If set, the image is resized
187
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
188
+ Resampling filter to use when resizing the image.
189
+ data_format (`str` or `ChannelDimension`, *optional*):
190
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
191
+ input_data_format (`str` or `ChannelDimension`, *optional*):
192
+ The channel dimension format of the input image. If not provided, it will be inferred.
193
+ """
194
+ size = get_size_dict(size, default_to_square=False)
195
+ if "shortest_edge" not in size and ("height" not in size or "width" not in size):
196
+ raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
197
+ if crop_pct is not None:
198
+ if "shortest_edge" in size:
199
+ scale_size = int(size["shortest_edge"] / crop_pct)
200
+ elif "height" in size and "width" in size:
201
+ if size["height"] == size["width"]:
202
+ scale_size = int(size["height"] / crop_pct)
203
+ else:
204
+ scale_size = (int(size["height"] / crop_pct), int(size["width"] / crop_pct))
205
+ else:
206
+ raise ValueError("Invalid size for resize: {}".format(size))
207
+
208
+ output_size = get_resize_output_image_size(
209
+ image, size=scale_size, default_to_square=False, input_data_format=input_data_format
210
+ )
211
+ else:
212
+ if "shortest_edge" in size:
213
+ output_size = get_resize_output_image_size(
214
+ image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
215
+ )
216
+ elif "height" in size and "width" in size:
217
+ output_size = (size["height"], size["width"])
218
+ else:
219
+ raise ValueError("Invalid size for resize: {}".format(size))
220
+
221
+ return resize(
222
+ image,
223
+ size=output_size,
224
+ resample=resample,
225
+ data_format=data_format,
226
+ input_data_format=input_data_format,
227
+ **kwargs,
228
+ )
229
+
230
+ def preprocess(
231
+ self,
232
+ images: ImageInput,
233
+ do_resize: bool = None,
234
+ size: Dict[str, int] = None,
235
+ crop_pct: int = None,
236
+ resample: PILImageResampling = None,
237
+ do_center_crop: bool = None,
238
+ crop_size: Dict[str, int] = None,
239
+ do_rescale: bool = None,
240
+ rescale_factor: float = None,
241
+ do_normalize: bool = None,
242
+ image_mean: Optional[Union[float, List[float]]] = None,
243
+ image_std: Optional[Union[float, List[float]]] = None,
244
+ return_tensors: Optional[Union[str, TensorType]] = None,
245
+ data_format: ChannelDimension = ChannelDimension.FIRST,
246
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
247
+ **kwargs,
248
+ ) -> PIL.Image.Image:
249
+ """
250
+ Preprocess an image or batch of images.
251
+
252
+ Args:
253
+ images (`ImageInput`):
254
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
255
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
256
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
257
+ Whether to resize the image.
258
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
259
+ Size of the image after applying resize.
260
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
261
+ Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`.
262
+ resample (`int`, *optional*, defaults to `self.resample`):
263
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
264
+ has an effect if `do_resize` is set to `True`.
265
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
266
+ Whether to center crop the image.
267
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
268
+ Size of the image after applying center crop.
269
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
270
+ Whether to rescale the image values between [0 - 1].
271
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
272
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
273
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
274
+ Whether to normalize the image.
275
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
276
+ Image mean.
277
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
278
+ Image standard deviation.
279
+ return_tensors (`str` or `TensorType`, *optional*):
280
+ The type of tensors to return. Can be one of:
281
+ - Unset: Return a list of `np.ndarray`.
282
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
283
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
284
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
285
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
286
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
287
+ The channel dimension format for the output image. Can be one of:
288
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
289
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
290
+ input_data_format (`ChannelDimension` or `str`, *optional*):
291
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
292
+ from the input image. Can be one of:
293
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
294
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
295
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
296
+ """
297
+ do_resize = do_resize if do_resize is not None else self.do_resize
298
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
299
+ resample = resample if resample is not None else self.resample
300
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
301
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
302
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
303
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
304
+ image_mean = image_mean if image_mean is not None else self.image_mean
305
+ image_std = image_std if image_std is not None else self.image_std
306
+
307
+ size = size if size is not None else self.size
308
+ size = get_size_dict(size, default_to_square=False)
309
+ crop_size = crop_size if crop_size is not None else self.crop_size
310
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
311
+
312
+ images = make_list_of_images(images)
313
+
314
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
315
+
316
+ if not valid_images(images):
317
+ raise ValueError(
318
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
319
+ "torch.Tensor, tf.Tensor or jax.ndarray."
320
+ )
321
+ validate_preprocess_arguments(
322
+ do_rescale=do_rescale,
323
+ rescale_factor=rescale_factor,
324
+ do_normalize=do_normalize,
325
+ image_mean=image_mean,
326
+ image_std=image_std,
327
+ do_center_crop=do_center_crop,
328
+ crop_size=crop_size,
329
+ do_resize=do_resize,
330
+ size=size,
331
+ resample=resample,
332
+ )
333
+
334
+ # All transformations expect numpy arrays.
335
+ images = [to_numpy_array(image) for image in images]
336
+
337
+ if is_scaled_image(images[0]) and do_rescale:
338
+ logger.warning_once(
339
+ "It looks like you are trying to rescale already rescaled images. If the input"
340
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
341
+ )
342
+
343
+ if input_data_format is None:
344
+ # We assume that all images have the same channel dimension format.
345
+ input_data_format = infer_channel_dimension_format(images[0])
346
+
347
+ if do_resize:
348
+ images = [
349
+ self.resize(
350
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
351
+ )
352
+ for image in images
353
+ ]
354
+
355
+ if do_center_crop:
356
+ images = [
357
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
358
+ ]
359
+
360
+ if do_rescale:
361
+ images = [
362
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
363
+ for image in images
364
+ ]
365
+
366
+ if do_normalize:
367
+ images = [
368
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
369
+ for image in images
370
+ ]
371
+
372
+ images = [
373
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
374
+ ]
375
+
376
+ data = {"pixel_values": images}
377
+ return BatchFeature(data=data, tensor_type=return_tensors)