applied-ai-018 commited on
Commit
d2d9ffe
·
verified ·
1 Parent(s): dbfc818

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py +63 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__init__.py +73 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py +242 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py +733 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py +396 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py +255 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__init__.py +102 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py +144 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py +243 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py +33 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py +338 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py +553 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py +667 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__init__.py +0 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py +231 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__init__.py +75 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py +137 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py +219 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py +33 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py +233 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py +780 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py +171 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py +60 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py +191 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py +13 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_autoformer": [
22
+ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "AutoformerConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_autoformer"] = [
34
+ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "AutoformerForPrediction",
36
+ "AutoformerModel",
37
+ "AutoformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_autoformer import (
43
+ AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ AutoformerConfig,
45
+ )
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_autoformer import (
54
+ AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ AutoformerForPrediction,
56
+ AutoformerModel,
57
+ AutoformerPreTrainedModel,
58
+ )
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenOnnxConfig"],
21
+ "tokenization_codegen": ["CodeGenTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_tokenizers_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_codegen"] = [
39
+ "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "CodeGenForCausalLM",
41
+ "CodeGenModel",
42
+ "CodeGenPreTrainedModel",
43
+ ]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenOnnxConfig
47
+ from .tokenization_codegen import CodeGenTokenizer
48
+
49
+ try:
50
+ if not is_tokenizers_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .tokenization_codegen_fast import CodeGenTokenizerFast
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_codegen import (
64
+ CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ CodeGenForCausalLM,
66
+ CodeGenModel,
67
+ CodeGenPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc ADDED
Binary file (9.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc ADDED
Binary file (9.16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CodeGen model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
29
+ "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
30
+ "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
31
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
32
+ "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
33
+ "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
34
+ "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
35
+ "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
36
+ "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
37
+ "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
38
+ "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
39
+ "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
40
+ "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
41
+ }
42
+
43
+
44
+ class CodeGenConfig(PretrainedConfig):
45
+ r"""
46
+ This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
47
+ CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
48
+ with the defaults will yield a similar configuration to that of the CodeGen
49
+ [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
50
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
51
+ [`PretrainedConfig`] for more information.
52
+
53
+ Args:
54
+ vocab_size (`int`, *optional*, defaults to 50400):
55
+ Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
56
+ `inputs_ids` passed when calling [`CodeGenModel`].
57
+ n_positions (`int`, *optional*, defaults to 2048):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ n_ctx (`int`, *optional*, defaults to 2048):
61
+ This attribute is used in `CodeGenModel.__init__` without any real effect.
62
+ n_embd (`int`, *optional*, defaults to 4096):
63
+ Dimensionality of the embeddings and hidden states.
64
+ n_layer (`int`, *optional*, defaults to 28):
65
+ Number of hidden layers in the Transformer encoder.
66
+ n_head (`int`, *optional*, defaults to 16):
67
+ Number of attention heads for each attention layer in the Transformer encoder.
68
+ rotary_dim (`int`, *optional*, defaults to 64):
69
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
70
+ n_inner (`int`, *optional*):
71
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
72
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
73
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
74
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
75
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
76
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
77
+ The dropout ratio for the embeddings.
78
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
79
+ The dropout ratio for the attention.
80
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
81
+ The epsilon to use in the layer normalization layers.
82
+ initializer_range (`float`, *optional*, defaults to 0.02):
83
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models).
86
+ bos_token_id (`int`, *optional*, defaults to 50256):
87
+ Beginning of stream token id.
88
+ eos_token_id (`int`, *optional*, defaults to 50256):
89
+ End of stream token id.
90
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
91
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
92
+ model has a output word embedding layer.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import CodeGenConfig, CodeGenModel
98
+
99
+ >>> # Initializing a CodeGen 6B configuration
100
+ >>> configuration = CodeGenConfig()
101
+
102
+ >>> # Initializing a model (with random weights) from the configuration
103
+ >>> model = CodeGenModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "codegen"
110
+ attribute_map = {
111
+ "max_position_embeddings": "n_positions",
112
+ "hidden_size": "n_embd",
113
+ "num_attention_heads": "n_head",
114
+ "num_hidden_layers": "n_layer",
115
+ }
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=50400,
120
+ n_positions=2048,
121
+ n_ctx=2048,
122
+ n_embd=4096,
123
+ n_layer=28,
124
+ n_head=16,
125
+ rotary_dim=64,
126
+ n_inner=None,
127
+ activation_function="gelu_new",
128
+ resid_pdrop=0.0,
129
+ embd_pdrop=0.0,
130
+ attn_pdrop=0.0,
131
+ layer_norm_epsilon=1e-5,
132
+ initializer_range=0.02,
133
+ use_cache=True,
134
+ bos_token_id=50256,
135
+ eos_token_id=50256,
136
+ tie_word_embeddings=False,
137
+ **kwargs,
138
+ ):
139
+ self.vocab_size = vocab_size
140
+ self.n_ctx = n_ctx
141
+ self.n_positions = n_positions
142
+ self.n_embd = n_embd
143
+ self.n_layer = n_layer
144
+ self.n_head = n_head
145
+ self.n_inner = n_inner
146
+ self.rotary_dim = rotary_dim
147
+ self.activation_function = activation_function
148
+ self.resid_pdrop = resid_pdrop
149
+ self.embd_pdrop = embd_pdrop
150
+ self.attn_pdrop = attn_pdrop
151
+ self.layer_norm_epsilon = layer_norm_epsilon
152
+ self.initializer_range = initializer_range
153
+ self.use_cache = use_cache
154
+
155
+ self.bos_token_id = bos_token_id
156
+ self.eos_token_id = eos_token_id
157
+
158
+ super().__init__(
159
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
160
+ )
161
+
162
+
163
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
164
+ class CodeGenOnnxConfig(OnnxConfigWithPast):
165
+ def __init__(
166
+ self,
167
+ config: PretrainedConfig,
168
+ task: str = "default",
169
+ patching_specs: List[PatchingSpec] = None,
170
+ use_past: bool = False,
171
+ ):
172
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
173
+ if not getattr(self._config, "pad_token_id", None):
174
+ # TODO: how to do that better?
175
+ self._config.pad_token_id = 0
176
+
177
+ @property
178
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
179
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
180
+ if self.use_past:
181
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
182
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
183
+ else:
184
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
185
+
186
+ return common_inputs
187
+
188
+ @property
189
+ def num_layers(self) -> int:
190
+ return self._config.n_layer
191
+
192
+ @property
193
+ def num_attention_heads(self) -> int:
194
+ return self._config.n_head
195
+
196
+ def generate_dummy_inputs(
197
+ self,
198
+ tokenizer: PreTrainedTokenizer,
199
+ batch_size: int = -1,
200
+ seq_length: int = -1,
201
+ is_pair: bool = False,
202
+ framework: Optional[TensorType] = None,
203
+ ) -> Mapping[str, Any]:
204
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
205
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
206
+ )
207
+
208
+ # We need to order the input in the way they appears in the forward()
209
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
210
+
211
+ # Need to add the past_keys
212
+ if self.use_past:
213
+ if not is_torch_available():
214
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
215
+ else:
216
+ import torch
217
+
218
+ batch, seqlen = common_inputs["input_ids"].shape
219
+ # Not using the same length for past_key_values
220
+ past_key_values_length = seqlen + 2
221
+ past_shape = (
222
+ batch,
223
+ self.num_attention_heads,
224
+ past_key_values_length,
225
+ self._config.hidden_size // self.num_attention_heads,
226
+ )
227
+ ordered_inputs["past_key_values"] = [
228
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
229
+ ]
230
+
231
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
232
+ if self.use_past:
233
+ mask_dtype = ordered_inputs["attention_mask"].dtype
234
+ ordered_inputs["attention_mask"] = torch.cat(
235
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
236
+ )
237
+
238
+ return ordered_inputs
239
+
240
+ @property
241
+ def default_onnx_opset(self) -> int:
242
+ return 13
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CodeGen model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import CrossEntropyLoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
28
+ from .configuration_codegen import CodeGenConfig
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ _CHECKPOINT_FOR_DOC = "Salesforce/codegen-2B-mono"
34
+ _CONFIG_FOR_DOC = "CodeGenConfig"
35
+
36
+
37
+ CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = [
38
+ "Salesforce/codegen-350M-nl",
39
+ "Salesforce/codegen-350M-multi",
40
+ "Salesforce/codegen-350M-mono",
41
+ "Salesforce/codegen-2B-nl",
42
+ "Salesforce/codegen-2B-multi",
43
+ "Salesforce/codegen-2B-mono",
44
+ "Salesforce/codegen-6B-nl",
45
+ "Salesforce/codegen-6B-multi",
46
+ "Salesforce/codegen-6B-mono",
47
+ "Salesforce/codegen-16B-nl",
48
+ "Salesforce/codegen-16B-multi",
49
+ "Salesforce/codegen-16B-mono",
50
+ # See all CodeGen models at https://huggingface.co/models?filter=codegen
51
+ ]
52
+
53
+
54
+ # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
55
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
56
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
57
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
58
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
59
+
60
+
61
+ # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
62
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
63
+ x1 = x[:, :, :, ::2]
64
+ x2 = x[:, :, :, 1::2]
65
+ x = torch.stack((-x2, x1), dim=-1)
66
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
67
+
68
+
69
+ # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
70
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
71
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
72
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
73
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
74
+
75
+
76
+ class CodeGenAttention(nn.Module):
77
+ def __init__(self, config):
78
+ super().__init__()
79
+
80
+ max_positions = config.max_position_embeddings
81
+ self.register_buffer(
82
+ "causal_mask",
83
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
84
+ 1, 1, max_positions, max_positions
85
+ ),
86
+ persistent=False,
87
+ )
88
+
89
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
90
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
91
+
92
+ self.embed_dim = config.hidden_size
93
+ self.num_attention_heads = config.num_attention_heads
94
+ self.head_dim = self.embed_dim // self.num_attention_heads
95
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
96
+ raise ValueError(
97
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
98
+ f" `num_attention_heads`: {self.num_attention_heads})."
99
+ )
100
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
101
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
102
+
103
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
104
+ self.rotary_dim = config.rotary_dim
105
+ pos_embd_dim = self.rotary_dim or self.embed_dim
106
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
107
+
108
+ def _split_heads(self, x, n_head, dim_head, mp_num):
109
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
110
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
111
+ return reshaped
112
+
113
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
114
+ """
115
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
116
+ """
117
+ if len(tensor.shape) == 5:
118
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
119
+ elif len(tensor.shape) == 4:
120
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
121
+ else:
122
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
123
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
124
+ return tensor.view(new_shape)
125
+
126
+ def _attn(
127
+ self,
128
+ query,
129
+ key,
130
+ value,
131
+ attention_mask=None,
132
+ head_mask=None,
133
+ ):
134
+ # compute causal mask from causal mask buffer
135
+ query_length, key_length = query.size(-2), key.size(-2)
136
+ causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
137
+
138
+ # Keep the attention weights computation in fp32 to avoid overflow issues
139
+ query = query.to(torch.float32)
140
+ key = key.to(torch.float32)
141
+
142
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
143
+
144
+ attn_weights = attn_weights / self.scale_attn
145
+ mask_value = torch.finfo(attn_weights.dtype).min
146
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
147
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
148
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
149
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
150
+
151
+ if attention_mask is not None:
152
+ # Apply the attention mask
153
+ attn_weights = attn_weights + attention_mask
154
+
155
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
156
+ attn_weights = attn_weights.to(value.dtype)
157
+ attn_weights = self.attn_dropout(attn_weights)
158
+
159
+ # Mask heads if we want to
160
+ if head_mask is not None:
161
+ attn_weights = attn_weights * head_mask
162
+
163
+ attn_output = torch.matmul(attn_weights, value)
164
+
165
+ return attn_output, attn_weights
166
+
167
+ def forward(
168
+ self,
169
+ hidden_states: Optional[torch.FloatTensor],
170
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
171
+ attention_mask: Optional[torch.FloatTensor] = None,
172
+ position_ids: Optional[torch.LongTensor] = None,
173
+ head_mask: Optional[torch.FloatTensor] = None,
174
+ use_cache: Optional[bool] = False,
175
+ output_attentions: Optional[bool] = False,
176
+ ) -> Union[
177
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
178
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
179
+ ]:
180
+ qkv = self.qkv_proj(hidden_states)
181
+ # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
182
+ mp_num = 4
183
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
184
+
185
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
186
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
187
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
188
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
189
+
190
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
191
+ value = value.permute(0, 2, 1, 3)
192
+
193
+ embed_positions = self.embed_positions
194
+ if embed_positions.device != position_ids.device:
195
+ embed_positions = embed_positions.to(position_ids.device)
196
+ self.embed_positions = embed_positions
197
+
198
+ sincos = embed_positions[position_ids]
199
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
200
+
201
+ if self.rotary_dim is not None:
202
+ k_rot = key[:, :, :, : self.rotary_dim]
203
+ k_pass = key[:, :, :, self.rotary_dim :]
204
+
205
+ q_rot = query[:, :, :, : self.rotary_dim]
206
+ q_pass = query[:, :, :, self.rotary_dim :]
207
+
208
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
209
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
210
+
211
+ key = torch.cat([k_rot, k_pass], dim=-1)
212
+ query = torch.cat([q_rot, q_pass], dim=-1)
213
+ else:
214
+ key = apply_rotary_pos_emb(key, sin, cos)
215
+ query = apply_rotary_pos_emb(query, sin, cos)
216
+
217
+ key = key.permute(0, 2, 1, 3)
218
+ query = query.permute(0, 2, 1, 3)
219
+
220
+ if layer_past is not None:
221
+ past_key = layer_past[0]
222
+ past_value = layer_past[1]
223
+ key = torch.cat((past_key, key), dim=-2)
224
+ value = torch.cat((past_value, value), dim=-2)
225
+
226
+ if use_cache is True:
227
+ # Note that this cast is quite ugly, but is not implemented before ROPE as k_rot in the original codebase is always in fp32.
228
+ # Reference: https://github.com/salesforce/CodeGen/blob/f210c3bb1216c975ad858cd4132c0fdeabf4bfc2/codegen1/jaxformer/hf/codegen/modeling_codegen.py#L38
229
+ present = (key.to(hidden_states.dtype), value)
230
+ else:
231
+ present = None
232
+
233
+ # compute self-attention: V x Softmax(QK^T)
234
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
235
+
236
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
237
+ attn_output = self.out_proj(attn_output)
238
+ attn_output = self.resid_dropout(attn_output)
239
+
240
+ outputs = (attn_output, present)
241
+ if output_attentions:
242
+ outputs += (attn_weights,)
243
+
244
+ return outputs # a, present, (attentions)
245
+
246
+
247
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->CodeGen
248
+ class CodeGenMLP(nn.Module):
249
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
250
+ super().__init__()
251
+ embed_dim = config.n_embd
252
+
253
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
254
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
255
+
256
+ self.act = ACT2FN[config.activation_function]
257
+ self.dropout = nn.Dropout(config.resid_pdrop)
258
+
259
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
260
+ hidden_states = self.fc_in(hidden_states)
261
+ hidden_states = self.act(hidden_states)
262
+ hidden_states = self.fc_out(hidden_states)
263
+ hidden_states = self.dropout(hidden_states)
264
+ return hidden_states
265
+
266
+
267
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
268
+ class CodeGenBlock(nn.Module):
269
+ # Ignore copy
270
+ def __init__(self, config):
271
+ super().__init__()
272
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
273
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
274
+ self.attn = CodeGenAttention(config)
275
+ self.mlp = CodeGenMLP(inner_dim, config)
276
+
277
+ def forward(
278
+ self,
279
+ hidden_states: Optional[torch.FloatTensor],
280
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
281
+ attention_mask: Optional[torch.FloatTensor] = None,
282
+ position_ids: Optional[torch.LongTensor] = None,
283
+ head_mask: Optional[torch.FloatTensor] = None,
284
+ use_cache: Optional[bool] = False,
285
+ output_attentions: Optional[bool] = False,
286
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
287
+ residual = hidden_states
288
+ hidden_states = self.ln_1(hidden_states)
289
+ attn_outputs = self.attn(
290
+ hidden_states=hidden_states,
291
+ layer_past=layer_past,
292
+ attention_mask=attention_mask,
293
+ position_ids=position_ids,
294
+ head_mask=head_mask,
295
+ use_cache=use_cache,
296
+ output_attentions=output_attentions,
297
+ )
298
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
299
+ outputs = attn_outputs[1:]
300
+
301
+ feed_forward_hidden_states = self.mlp(hidden_states)
302
+ hidden_states = attn_output + feed_forward_hidden_states + residual
303
+
304
+ if use_cache:
305
+ outputs = (hidden_states,) + outputs
306
+ else:
307
+ outputs = (hidden_states,) + outputs[1:]
308
+
309
+ return outputs # hidden_states, present, (attentions)
310
+
311
+
312
+ class CodeGenPreTrainedModel(PreTrainedModel):
313
+ """
314
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
315
+ models.
316
+ """
317
+
318
+ config_class = CodeGenConfig
319
+ base_model_prefix = "transformer"
320
+ supports_gradient_checkpointing = True
321
+ _no_split_modules = ["CodeGenBlock"]
322
+ _skip_keys_device_placement = "past_key_values"
323
+
324
+ def __init__(self, *inputs, **kwargs):
325
+ super().__init__(*inputs, **kwargs)
326
+
327
+ def _init_weights(self, module):
328
+ """Initialize the weights."""
329
+ if isinstance(module, (nn.Linear,)):
330
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
331
+ # cf https://github.com/pytorch/pytorch/pull/5617
332
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
333
+ if module.bias is not None:
334
+ module.bias.data.zero_()
335
+ elif isinstance(module, nn.Embedding):
336
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
337
+ if module.padding_idx is not None:
338
+ module.weight.data[module.padding_idx].zero_()
339
+ elif isinstance(module, nn.LayerNorm):
340
+ module.bias.data.zero_()
341
+ module.weight.data.fill_(1.0)
342
+
343
+
344
+ CODEGEN_START_DOCSTRING = r"""
345
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
346
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
347
+ behavior.
348
+
349
+ Parameters:
350
+ config ([`CodeGenConfig`]): Model configuration class with all the parameters of the model.
351
+ Initializing with a config file does not load the weights associated with the model, only the
352
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
353
+ """
354
+
355
+ CODEGEN_INPUTS_DOCSTRING = r"""
356
+ Args:
357
+ input_ids (`torch.LongTensor` of shape `({0})`):
358
+ Indices of input sequence tokens in the vocabulary.
359
+
360
+ Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
361
+ [`PreTrainedTokenizer.__call__`] for details.
362
+
363
+ [What are input IDs?](../glossary#input-ids)
364
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
365
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
366
+
367
+ - 1 for tokens that are **not masked**,
368
+ - 0 for tokens that are **masked**.
369
+
370
+ [What are attention masks?](../glossary#attention-mask)
371
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
372
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
373
+ 1]`:
374
+
375
+ - 0 corresponds to a *sentence A* token,
376
+ - 1 corresponds to a *sentence B* token.
377
+
378
+ [What are token type IDs?](../glossary#token-type-ids)
379
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
380
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
381
+ config.n_positions - 1]`.
382
+
383
+ [What are position IDs?](../glossary#position-ids)
384
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
385
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
386
+
387
+ - 1 indicates the head is **not masked**,
388
+ - 0 indicates the head is **masked**.
389
+
390
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
391
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
392
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
393
+ model's internal embedding lookup matrix.
394
+ output_attentions (`bool`, *optional*):
395
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
396
+ tensors for more detail.
397
+ output_hidden_states (`bool`, *optional*):
398
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
399
+ more detail.
400
+ return_dict (`bool`, *optional*):
401
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
402
+ """
403
+
404
+
405
+ @add_start_docstrings(
406
+ "The bare CodeGen Model transformer outputting raw hidden-states without any specific head on top.",
407
+ CODEGEN_START_DOCSTRING,
408
+ )
409
+ class CodeGenModel(CodeGenPreTrainedModel):
410
+ def __init__(self, config):
411
+ super().__init__(config)
412
+
413
+ self.embed_dim = config.n_embd
414
+ self.vocab_size = config.vocab_size
415
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
416
+ self.drop = nn.Dropout(config.embd_pdrop)
417
+ self.h = nn.ModuleList([CodeGenBlock(config) for _ in range(config.n_layer)])
418
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
419
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
420
+
421
+ self.gradient_checkpointing = False
422
+
423
+ # Initialize weights and apply final processing
424
+ self.post_init()
425
+
426
+ def get_input_embeddings(self):
427
+ return self.wte
428
+
429
+ def set_input_embeddings(self, new_embeddings):
430
+ self.wte = new_embeddings
431
+
432
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
433
+ @add_code_sample_docstrings(
434
+ checkpoint=_CHECKPOINT_FOR_DOC,
435
+ output_type=BaseModelOutputWithPast,
436
+ config_class=_CONFIG_FOR_DOC,
437
+ )
438
+ def forward(
439
+ self,
440
+ input_ids: Optional[torch.LongTensor] = None,
441
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
442
+ attention_mask: Optional[torch.FloatTensor] = None,
443
+ token_type_ids: Optional[torch.LongTensor] = None,
444
+ position_ids: Optional[torch.LongTensor] = None,
445
+ head_mask: Optional[torch.FloatTensor] = None,
446
+ inputs_embeds: Optional[torch.FloatTensor] = None,
447
+ use_cache: Optional[bool] = None,
448
+ output_attentions: Optional[bool] = None,
449
+ output_hidden_states: Optional[bool] = None,
450
+ return_dict: Optional[bool] = None,
451
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
452
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
453
+ output_hidden_states = (
454
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
455
+ )
456
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
457
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
458
+
459
+ if input_ids is not None and inputs_embeds is not None:
460
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
461
+ elif input_ids is not None:
462
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
463
+ input_shape = input_ids.size()
464
+ input_ids = input_ids.view(-1, input_shape[-1])
465
+ batch_size = input_ids.shape[0]
466
+ elif inputs_embeds is not None:
467
+ input_shape = inputs_embeds.size()[:-1]
468
+ batch_size = inputs_embeds.shape[0]
469
+ else:
470
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
471
+
472
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
473
+
474
+ if token_type_ids is not None:
475
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
476
+
477
+ if past_key_values is None:
478
+ past_length = 0
479
+ past_key_values = tuple([None] * len(self.h))
480
+ else:
481
+ past_length = past_key_values[0][0].size(-2)
482
+
483
+ if position_ids is None:
484
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
485
+ position_ids = position_ids.unsqueeze(0)
486
+
487
+ # Attention mask.
488
+ if attention_mask is not None:
489
+ if batch_size <= 0:
490
+ raise ValueError("batch_size has to be defined and > 0")
491
+ attention_mask = attention_mask.view(batch_size, -1)
492
+ # We create a 3D attention mask from a 2D tensor mask.
493
+ # Sizes are [batch_size, 1, 1, to_seq_length]
494
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
495
+ # this attention mask is more simple than the triangular masking of causal attention
496
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
497
+ attention_mask = attention_mask[:, None, None, :]
498
+
499
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
500
+ # masked positions, this operation will create a tensor which is 0.0 for
501
+ # positions we want to attend and the dtype's smallest value for masked positions.
502
+ # Since we are adding it to the raw scores before the softmax, this is
503
+ # effectively the same as removing these entirely.
504
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
505
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
506
+
507
+ # Prepare head mask if needed
508
+ # 1.0 in head_mask indicate we keep the head
509
+ # attention_probs has shape bsz x num_attention_heads x N x N
510
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
511
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
512
+
513
+ if inputs_embeds is None:
514
+ inputs_embeds = self.wte(input_ids)
515
+
516
+ hidden_states = inputs_embeds
517
+
518
+ if token_type_ids is not None:
519
+ token_type_embeds = self.wte(token_type_ids)
520
+ hidden_states = hidden_states + token_type_embeds
521
+
522
+ hidden_states = self.drop(hidden_states)
523
+
524
+ output_shape = input_shape + (hidden_states.size(-1),)
525
+
526
+ if self.gradient_checkpointing and self.training:
527
+ if use_cache:
528
+ logger.warning_once(
529
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
530
+ "`use_cache=False`..."
531
+ )
532
+ use_cache = False
533
+
534
+ presents = () if use_cache else None
535
+ all_self_attentions = () if output_attentions else None
536
+ all_hidden_states = () if output_hidden_states else None
537
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
538
+ if output_hidden_states:
539
+ all_hidden_states = all_hidden_states + (hidden_states,)
540
+
541
+ if self.gradient_checkpointing and self.training:
542
+ outputs = self._gradient_checkpointing_func(
543
+ block.__call__,
544
+ hidden_states,
545
+ None,
546
+ attention_mask,
547
+ position_ids,
548
+ head_mask[i],
549
+ use_cache,
550
+ output_attentions,
551
+ )
552
+ else:
553
+ outputs = block(
554
+ hidden_states=hidden_states,
555
+ layer_past=layer_past,
556
+ attention_mask=attention_mask,
557
+ position_ids=position_ids,
558
+ head_mask=head_mask[i],
559
+ use_cache=use_cache,
560
+ output_attentions=output_attentions,
561
+ )
562
+
563
+ hidden_states = outputs[0]
564
+ if use_cache is True:
565
+ presents = presents + (outputs[1],)
566
+
567
+ if output_attentions:
568
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
569
+
570
+ hidden_states = self.ln_f(hidden_states)
571
+
572
+ hidden_states = hidden_states.view(output_shape)
573
+ # Add last hidden state
574
+ if output_hidden_states:
575
+ all_hidden_states = all_hidden_states + (hidden_states,)
576
+
577
+ if not return_dict:
578
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
579
+
580
+ return BaseModelOutputWithPast(
581
+ last_hidden_state=hidden_states,
582
+ past_key_values=presents,
583
+ hidden_states=all_hidden_states,
584
+ attentions=all_self_attentions,
585
+ )
586
+
587
+
588
+ @add_start_docstrings(
589
+ """
590
+ The CodeGen Model transformer with a language modeling head on top.
591
+ """,
592
+ CODEGEN_START_DOCSTRING,
593
+ )
594
+ class CodeGenForCausalLM(CodeGenPreTrainedModel):
595
+ _tied_weights_keys = ["lm_head.weight"]
596
+
597
+ def __init__(self, config):
598
+ super().__init__(config)
599
+ self.transformer = CodeGenModel(config)
600
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
601
+
602
+ # Initialize weights and apply final processing
603
+ self.post_init()
604
+
605
+ def get_output_embeddings(self):
606
+ return self.lm_head
607
+
608
+ def set_output_embeddings(self, new_embeddings):
609
+ self.lm_head = new_embeddings
610
+
611
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
612
+ token_type_ids = kwargs.get("token_type_ids", None)
613
+ # Omit tokens covered by past_key_values
614
+ if past_key_values:
615
+ past_length = past_key_values[0][0].shape[2]
616
+
617
+ # Some generation methods already pass only the last input ID
618
+ if input_ids.shape[1] > past_length:
619
+ remove_prefix_length = past_length
620
+ else:
621
+ # Default to old behavior: keep only final ID
622
+ remove_prefix_length = input_ids.shape[1] - 1
623
+
624
+ input_ids = input_ids[:, remove_prefix_length:]
625
+ if token_type_ids is not None:
626
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
627
+
628
+ attention_mask = kwargs.get("attention_mask", None)
629
+ position_ids = kwargs.get("position_ids", None)
630
+
631
+ if attention_mask is not None and position_ids is None:
632
+ # create position_ids on the fly for batch generation
633
+ position_ids = attention_mask.long().cumsum(-1) - 1
634
+ position_ids.masked_fill_(attention_mask == 0, 1)
635
+ if past_key_values:
636
+ position_ids = position_ids[:, -input_ids.shape[1] :]
637
+
638
+ return {
639
+ "input_ids": input_ids,
640
+ "past_key_values": past_key_values,
641
+ "use_cache": kwargs.get("use_cache"),
642
+ "position_ids": position_ids,
643
+ "attention_mask": attention_mask,
644
+ "token_type_ids": token_type_ids,
645
+ }
646
+
647
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
648
+ @add_code_sample_docstrings(
649
+ checkpoint=_CHECKPOINT_FOR_DOC,
650
+ output_type=CausalLMOutputWithPast,
651
+ config_class=_CONFIG_FOR_DOC,
652
+ )
653
+ def forward(
654
+ self,
655
+ input_ids: Optional[torch.LongTensor] = None,
656
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
657
+ attention_mask: Optional[torch.FloatTensor] = None,
658
+ token_type_ids: Optional[torch.LongTensor] = None,
659
+ position_ids: Optional[torch.LongTensor] = None,
660
+ head_mask: Optional[torch.FloatTensor] = None,
661
+ inputs_embeds: Optional[torch.FloatTensor] = None,
662
+ labels: Optional[torch.LongTensor] = None,
663
+ use_cache: Optional[bool] = None,
664
+ output_attentions: Optional[bool] = None,
665
+ output_hidden_states: Optional[bool] = None,
666
+ return_dict: Optional[bool] = None,
667
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
668
+ r"""
669
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
670
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
671
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
672
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
673
+ """
674
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
675
+
676
+ transformer_outputs = self.transformer(
677
+ input_ids,
678
+ past_key_values=past_key_values,
679
+ attention_mask=attention_mask,
680
+ token_type_ids=token_type_ids,
681
+ position_ids=position_ids,
682
+ head_mask=head_mask,
683
+ inputs_embeds=inputs_embeds,
684
+ use_cache=use_cache,
685
+ output_attentions=output_attentions,
686
+ output_hidden_states=output_hidden_states,
687
+ return_dict=return_dict,
688
+ )
689
+ hidden_states = transformer_outputs[0]
690
+
691
+ # make sure sampling in fp16 works correctly and
692
+ # compute loss in fp32 to match with mesh-tf version
693
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
694
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
695
+
696
+ loss = None
697
+ if labels is not None:
698
+ # move labels to correct device to enable model parallelism
699
+ labels = labels.to(lm_logits.device)
700
+ # Shift so that tokens < n predict n
701
+ shift_logits = lm_logits[..., :-1, :].contiguous()
702
+ shift_labels = labels[..., 1:].contiguous()
703
+ # Flatten the tokens
704
+ loss_fct = CrossEntropyLoss()
705
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
706
+
707
+ loss = loss.to(hidden_states.dtype)
708
+
709
+ if not return_dict:
710
+ output = (lm_logits,) + transformer_outputs[1:]
711
+ return ((loss,) + output) if loss is not None else output
712
+
713
+ return CausalLMOutputWithPast(
714
+ loss=loss,
715
+ logits=lm_logits,
716
+ past_key_values=transformer_outputs.past_key_values,
717
+ hidden_states=transformer_outputs.hidden_states,
718
+ attentions=transformer_outputs.attentions,
719
+ )
720
+
721
+ @staticmethod
722
+ def _reorder_cache(
723
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
724
+ ) -> Tuple[Tuple[torch.Tensor]]:
725
+ """
726
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
727
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
728
+ beam_idx at every generation step.
729
+ """
730
+ return tuple(
731
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
732
+ for layer_past in past_key_values
733
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CodeGen"""
16
+
17
+
18
+ import json
19
+ import os
20
+ from functools import lru_cache
21
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import regex as re
25
+
26
+ from ...utils import is_tf_available, is_torch_available, logging, to_py_obj
27
+
28
+
29
+ if TYPE_CHECKING:
30
+ if is_torch_available():
31
+ import torch
32
+ if is_tf_available():
33
+ import tensorflow as tf
34
+
35
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ VOCAB_FILES_NAMES = {
41
+ "vocab_file": "vocab.json",
42
+ "merges_file": "merges.txt",
43
+ }
44
+
45
+ PRETRAINED_VOCAB_FILES_MAP = {
46
+ "vocab_file": {
47
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
48
+ },
49
+ "merges_file": {
50
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
51
+ },
52
+ }
53
+
54
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
55
+ "Salesforce/codegen-350M-mono": 2048,
56
+ }
57
+
58
+
59
+ @lru_cache()
60
+ def bytes_to_unicode():
61
+ """
62
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
63
+ characters the bpe code barfs on.
64
+
65
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
66
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
67
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
68
+ tables between utf-8 bytes and unicode strings.
69
+ """
70
+ bs = (
71
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
72
+ )
73
+ cs = bs[:]
74
+ n = 0
75
+ for b in range(2**8):
76
+ if b not in bs:
77
+ bs.append(b)
78
+ cs.append(2**8 + n)
79
+ n += 1
80
+ cs = [chr(n) for n in cs]
81
+ return dict(zip(bs, cs))
82
+
83
+
84
+ def get_pairs(word):
85
+ """
86
+ Return set of symbol pairs in a word.
87
+
88
+ Word is represented as tuple of symbols (symbols being variable-length strings).
89
+ """
90
+ pairs = set()
91
+ prev_char = word[0]
92
+ for char in word[1:]:
93
+ pairs.add((prev_char, char))
94
+ prev_char = char
95
+ return pairs
96
+
97
+
98
+ class CodeGenTokenizer(PreTrainedTokenizer):
99
+ """
100
+ Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.
101
+
102
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
103
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
104
+
105
+ ```python
106
+ >>> from transformers import CodeGenTokenizer
107
+
108
+ >>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
109
+ >>> tokenizer("Hello world")["input_ids"]
110
+ [15496, 995]
111
+
112
+ >>> tokenizer(" Hello world")["input_ids"]
113
+ [18435, 995]
114
+ ```
115
+
116
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
117
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
118
+
119
+ <Tip>
120
+
121
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
122
+
123
+ </Tip>
124
+
125
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
126
+ this superclass for more information regarding those methods.
127
+
128
+ Args:
129
+ vocab_file (`str`):
130
+ Path to the vocabulary file.
131
+ merges_file (`str`):
132
+ Path to the merges file.
133
+ errors (`str`, *optional*, defaults to `"replace"`):
134
+ Paradigm to follow when decoding bytes to UTF-8. See
135
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
136
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
137
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
138
+ token instead.
139
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
140
+ The beginning of sequence token.
141
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
142
+ The end of sequence token.
143
+ pad_token (`str`, *optional*):
144
+ The token used for padding, for example when batching sequences of different lengths.
145
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
146
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
147
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
148
+ add_bos_token (`bool`, *optional*, defaults to `False`):
149
+ Whether to add a beginning of sequence token at the start of sequences.
150
+ """
151
+
152
+ vocab_files_names = VOCAB_FILES_NAMES
153
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
154
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
155
+ model_input_names = ["input_ids", "attention_mask"]
156
+
157
+ def __init__(
158
+ self,
159
+ vocab_file,
160
+ merges_file,
161
+ errors="replace",
162
+ unk_token="<|endoftext|>",
163
+ bos_token="<|endoftext|>",
164
+ eos_token="<|endoftext|>",
165
+ pad_token=None,
166
+ add_prefix_space=False,
167
+ add_bos_token=False,
168
+ **kwargs,
169
+ ):
170
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
171
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
172
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
173
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
174
+ self.add_bos_token = add_bos_token
175
+
176
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
177
+ self.encoder = json.load(vocab_handle)
178
+ self.decoder = {v: k for k, v in self.encoder.items()}
179
+ self.errors = errors # how to handle errors in decoding
180
+ self.byte_encoder = bytes_to_unicode()
181
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
182
+ with open(merges_file, encoding="utf-8") as merges_handle:
183
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
184
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
185
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
186
+ self.cache = {}
187
+ self.add_prefix_space = add_prefix_space
188
+
189
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
190
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
191
+ super().__init__(
192
+ errors=errors,
193
+ unk_token=unk_token,
194
+ bos_token=bos_token,
195
+ eos_token=eos_token,
196
+ pad_token=pad_token,
197
+ add_prefix_space=add_prefix_space,
198
+ add_bos_token=add_bos_token,
199
+ **kwargs,
200
+ )
201
+
202
+ @property
203
+ def vocab_size(self):
204
+ return len(self.encoder)
205
+
206
+ def get_vocab(self):
207
+ return dict(self.encoder, **self.added_tokens_encoder)
208
+
209
+ def bpe(self, token):
210
+ if token in self.cache:
211
+ return self.cache[token]
212
+ word = tuple(token)
213
+ pairs = get_pairs(word)
214
+
215
+ if not pairs:
216
+ return token
217
+
218
+ while True:
219
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
220
+ if bigram not in self.bpe_ranks:
221
+ break
222
+ first, second = bigram
223
+ new_word = []
224
+ i = 0
225
+ while i < len(word):
226
+ try:
227
+ j = word.index(first, i)
228
+ except ValueError:
229
+ new_word.extend(word[i:])
230
+ break
231
+ else:
232
+ new_word.extend(word[i:j])
233
+ i = j
234
+
235
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
236
+ new_word.append(first + second)
237
+ i += 2
238
+ else:
239
+ new_word.append(word[i])
240
+ i += 1
241
+ new_word = tuple(new_word)
242
+ word = new_word
243
+ if len(word) == 1:
244
+ break
245
+ else:
246
+ pairs = get_pairs(word)
247
+ word = " ".join(word)
248
+ self.cache[token] = word
249
+ return word
250
+
251
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
252
+ if self.add_bos_token:
253
+ bos_token_ids = [self.bos_token_id]
254
+ else:
255
+ bos_token_ids = []
256
+
257
+ output = bos_token_ids + token_ids_0
258
+
259
+ if token_ids_1 is None:
260
+ return output
261
+
262
+ return output + bos_token_ids + token_ids_1
263
+
264
+ def _tokenize(self, text):
265
+ """Tokenize a string."""
266
+ bpe_tokens = []
267
+ for token in re.findall(self.pat, text):
268
+ token = "".join(
269
+ self.byte_encoder[b] for b in token.encode("utf-8")
270
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
271
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
272
+ return bpe_tokens
273
+
274
+ def _convert_token_to_id(self, token):
275
+ """Converts a token (str) in an id using the vocab."""
276
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
277
+
278
+ def _convert_id_to_token(self, index):
279
+ """Converts an index (integer) in a token (str) using the vocab."""
280
+ return self.decoder.get(index)
281
+
282
+ def convert_tokens_to_string(self, tokens):
283
+ """Converts a sequence of tokens (string) in a single string."""
284
+ text = "".join(tokens)
285
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
286
+ return text
287
+
288
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
289
+ if not os.path.isdir(save_directory):
290
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
291
+ return
292
+ vocab_file = os.path.join(
293
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
294
+ )
295
+ merge_file = os.path.join(
296
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
297
+ )
298
+
299
+ with open(vocab_file, "w", encoding="utf-8") as f:
300
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
301
+
302
+ index = 0
303
+ with open(merge_file, "w", encoding="utf-8") as writer:
304
+ writer.write("#version: 0.2\n")
305
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
306
+ if index != token_index:
307
+ logger.warning(
308
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
309
+ " Please check that the tokenizer is not corrupted!"
310
+ )
311
+ index = token_index
312
+ writer.write(" ".join(bpe_tokens) + "\n")
313
+ index += 1
314
+
315
+ return vocab_file, merge_file
316
+
317
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
318
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
319
+ if is_split_into_words or add_prefix_space:
320
+ text = " " + text
321
+ return (text, kwargs)
322
+
323
+ def decode(
324
+ self,
325
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
326
+ skip_special_tokens: bool = False,
327
+ clean_up_tokenization_spaces: bool = None,
328
+ truncate_before_pattern: Optional[List[str]] = None,
329
+ **kwargs,
330
+ ) -> str:
331
+ """
332
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
333
+ tokens and clean up tokenization spaces.
334
+
335
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
336
+
337
+ Args:
338
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
339
+ List of tokenized input ids. Can be obtained using the `__call__` method.
340
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
341
+ Whether or not to remove special tokens in the decoding.
342
+ clean_up_tokenization_spaces (`bool`, *optional*):
343
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
344
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
345
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
346
+ A list of regular expression strings that will be used to truncate the returned string. This can be
347
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
348
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
349
+ kwargs (additional keyword arguments, *optional*):
350
+ Will be passed to the underlying model specific decode method.
351
+
352
+ Returns:
353
+ `str`: The decoded sentence.
354
+ """
355
+
356
+ token_ids = to_py_obj(token_ids)
357
+
358
+ decoded_text = super()._decode(
359
+ token_ids=token_ids,
360
+ skip_special_tokens=skip_special_tokens,
361
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
362
+ **kwargs,
363
+ )
364
+
365
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
366
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
367
+
368
+ return decoded_text
369
+
370
+ def truncate(self, completion, truncate_before_pattern):
371
+ def find_re(string, pattern, start_pos):
372
+ m = pattern.search(string, start_pos)
373
+ return m.start() if m else -1
374
+
375
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
376
+
377
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
378
+
379
+ if len(prints) > 1:
380
+ completion = completion[: prints[1].start()]
381
+
382
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
383
+
384
+ if len(defs) > 1:
385
+ completion = completion[: defs[1].start()]
386
+
387
+ start_pos = 0
388
+
389
+ terminals_pos = [
390
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
391
+ ]
392
+
393
+ if len(terminals_pos) > 0:
394
+ return completion[: min(terminals_pos)]
395
+ else:
396
+ return completion
env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for OpenAI GPT."""
16
+
17
+
18
+ import json
19
+ import re
20
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...utils import is_tf_available, is_torch_available, logging
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ if is_torch_available():
29
+ import torch
30
+ if is_tf_available():
31
+ import tensorflow as tf
32
+
33
+ from tokenizers import pre_tokenizers
34
+
35
+ from ...tokenization_utils_base import BatchEncoding
36
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
37
+ from .tokenization_codegen import CodeGenTokenizer
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
43
+
44
+ PRETRAINED_VOCAB_FILES_MAP = {
45
+ "vocab_file": {
46
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
47
+ },
48
+ "merges_file": {
49
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
50
+ },
51
+ "tokenizer_file": {
52
+ "Salesforce/codegen-350M-mono": (
53
+ "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
54
+ ),
55
+ },
56
+ }
57
+
58
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
59
+ "Salesforce/codegen-350M-mono": 2048,
60
+ }
61
+
62
+
63
+ class CodeGenTokenizerFast(PreTrainedTokenizerFast):
64
+ """
65
+ Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
66
+ Byte-Pair-Encoding.
67
+
68
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
69
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
70
+
71
+ ```python
72
+ >>> from transformers import CodeGenTokenizerFast
73
+
74
+ >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono")
75
+ >>> tokenizer("Hello world")["input_ids"]
76
+ [15496, 995]
77
+
78
+ >>> tokenizer(" Hello world")["input_ids"]
79
+ [18435, 995]
80
+ ```
81
+
82
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
83
+ the model was not pretrained this way, it might yield a decrease in performance.
84
+
85
+ <Tip>
86
+
87
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
88
+
89
+ </Tip>
90
+
91
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
92
+ refer to this superclass for more information regarding those methods.
93
+
94
+ Args:
95
+ vocab_file (`str`, *optional*):
96
+ Path to the vocabulary file.
97
+ merges_file (`str`, *optional*):
98
+ Path to the merges file.
99
+ tokenizer_file (`str`, *optional*):
100
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
101
+ contains everything needed to load the tokenizer.
102
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
103
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
104
+ token instead.
105
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
106
+ The beginning of sequence token.
107
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
108
+ The end of sequence token.
109
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
111
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
112
+ """
113
+
114
+ vocab_files_names = VOCAB_FILES_NAMES
115
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
116
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+ slow_tokenizer_class = CodeGenTokenizer
119
+
120
+ def __init__(
121
+ self,
122
+ vocab_file=None,
123
+ merges_file=None,
124
+ tokenizer_file=None,
125
+ unk_token="<|endoftext|>",
126
+ bos_token="<|endoftext|>",
127
+ eos_token="<|endoftext|>",
128
+ add_prefix_space=False,
129
+ **kwargs,
130
+ ):
131
+ super().__init__(
132
+ vocab_file,
133
+ merges_file,
134
+ tokenizer_file=tokenizer_file,
135
+ unk_token=unk_token,
136
+ bos_token=bos_token,
137
+ eos_token=eos_token,
138
+ add_prefix_space=add_prefix_space,
139
+ **kwargs,
140
+ )
141
+
142
+ if kwargs.pop("add_bos_token", False):
143
+ model_id = kwargs.pop("name_or_path", "")
144
+ raise ValueError(
145
+ "Currenty GPT2's fast tokenizer does NOT support adding a BOS token. "
146
+ "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
147
+ f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
148
+ f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
149
+ "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
150
+ " so that the fast tokenizer works correctly."
151
+ )
152
+
153
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
154
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
155
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
156
+ pre_tok_state["add_prefix_space"] = add_prefix_space
157
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
158
+
159
+ self.add_prefix_space = add_prefix_space
160
+
161
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
162
+ is_split_into_words = kwargs.get("is_split_into_words", False)
163
+ assert self.add_prefix_space or not is_split_into_words, (
164
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
165
+ "to use it with pretokenized inputs."
166
+ )
167
+
168
+ return super()._batch_encode_plus(*args, **kwargs)
169
+
170
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
171
+ is_split_into_words = kwargs.get("is_split_into_words", False)
172
+
173
+ assert self.add_prefix_space or not is_split_into_words, (
174
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
175
+ "to use it with pretokenized inputs."
176
+ )
177
+
178
+ return super()._encode_plus(*args, **kwargs)
179
+
180
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
181
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
182
+ return tuple(files)
183
+
184
+ def decode(
185
+ self,
186
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
187
+ skip_special_tokens: bool = False,
188
+ clean_up_tokenization_spaces: bool = None,
189
+ truncate_before_pattern: Optional[List[str]] = None,
190
+ **kwargs,
191
+ ) -> str:
192
+ """
193
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
194
+ tokens and clean up tokenization spaces.
195
+
196
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
197
+
198
+ Args:
199
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
200
+ List of tokenized input ids. Can be obtained using the `__call__` method.
201
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
202
+ Whether or not to remove special tokens in the decoding.
203
+ clean_up_tokenization_spaces (`bool`, *optional*):
204
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
205
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
206
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
207
+ A list of regular expression strings that will be used to truncate the returned string. This can be
208
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
209
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
210
+ kwargs (additional keyword arguments, *optional*):
211
+ Will be passed to the underlying model specific decode method.
212
+
213
+ Returns:
214
+ `str`: The decoded sentence.
215
+ """
216
+
217
+ decoded_text = super().decode(
218
+ token_ids=token_ids,
219
+ skip_special_tokens=skip_special_tokens,
220
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
221
+ **kwargs,
222
+ )
223
+
224
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
225
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
226
+
227
+ return decoded_text
228
+
229
+ def truncate(self, completion, truncate_before_pattern):
230
+ def find_re(string, pattern, start_pos):
231
+ m = pattern.search(string, start_pos)
232
+ return m.start() if m else -1
233
+
234
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
235
+
236
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
237
+
238
+ if len(prints) > 1:
239
+ completion = completion[: prints[1].start()]
240
+
241
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
242
+
243
+ if len(defs) > 1:
244
+ completion = completion[: defs[1].start()]
245
+
246
+ start_pos = 0
247
+
248
+ terminals_pos = [
249
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
250
+ ]
251
+
252
+ if len(terminals_pos) > 0:
253
+ return completion[: min(terminals_pos)]
254
+ else:
255
+ return completion
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"]
36
+ _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_convnext"] = [
45
+ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "ConvNextForImageClassification",
47
+ "ConvNextModel",
48
+ "ConvNextPreTrainedModel",
49
+ "ConvNextBackbone",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_convnext"] = [
59
+ "TFConvNextForImageClassification",
60
+ "TFConvNextModel",
61
+ "TFConvNextPreTrainedModel",
62
+ ]
63
+
64
+ if TYPE_CHECKING:
65
+ from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
66
+
67
+ try:
68
+ if not is_vision_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .feature_extraction_convnext import ConvNextFeatureExtractor
74
+ from .image_processing_convnext import ConvNextImageProcessor
75
+
76
+ try:
77
+ if not is_torch_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .modeling_convnext import (
83
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
84
+ ConvNextBackbone,
85
+ ConvNextForImageClassification,
86
+ ConvNextModel,
87
+ ConvNextPreTrainedModel,
88
+ )
89
+
90
+ try:
91
+ if not is_tf_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
97
+
98
+
99
+ else:
100
+ import sys
101
+
102
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvNeXT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ "facebook/convnext-tiny-224": "https://huggingface.co/facebook/convnext-tiny-224/resolve/main/config.json",
32
+ # See all ConvNeXT models at https://huggingface.co/models?filter=convnext
33
+ }
34
+
35
+
36
+ class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
37
+ r"""
38
+ This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an
39
+ ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
40
+ with the defaults will yield a similar configuration to that of the ConvNeXT
41
+ [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture.
42
+
43
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
44
+ documentation from [`PretrainedConfig`] for more information.
45
+
46
+ Args:
47
+ num_channels (`int`, *optional*, defaults to 3):
48
+ The number of input channels.
49
+ patch_size (`int`, optional, defaults to 4):
50
+ Patch size to use in the patch embedding layer.
51
+ num_stages (`int`, optional, defaults to 4):
52
+ The number of stages in the model.
53
+ hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]):
54
+ Dimensionality (hidden size) at each stage.
55
+ depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]):
56
+ Depth (number of blocks) for each stage.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
59
+ `"selu"` and `"gelu_new"` are supported.
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
63
+ The epsilon used by the layer normalization layers.
64
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-6):
65
+ The initial value for the layer scale.
66
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
67
+ The drop rate for stochastic depth.
68
+ out_features (`List[str]`, *optional*):
69
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
70
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
71
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
72
+ same order as defined in the `stage_names` attribute.
73
+ out_indices (`List[int]`, *optional*):
74
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
75
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
76
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
77
+ same order as defined in the `stage_names` attribute.
78
+
79
+ Example:
80
+ ```python
81
+ >>> from transformers import ConvNextConfig, ConvNextModel
82
+
83
+ >>> # Initializing a ConvNext convnext-tiny-224 style configuration
84
+ >>> configuration = ConvNextConfig()
85
+
86
+ >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration
87
+ >>> model = ConvNextModel(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+ ```"""
92
+
93
+ model_type = "convnext"
94
+
95
+ def __init__(
96
+ self,
97
+ num_channels=3,
98
+ patch_size=4,
99
+ num_stages=4,
100
+ hidden_sizes=None,
101
+ depths=None,
102
+ hidden_act="gelu",
103
+ initializer_range=0.02,
104
+ layer_norm_eps=1e-12,
105
+ layer_scale_init_value=1e-6,
106
+ drop_path_rate=0.0,
107
+ image_size=224,
108
+ out_features=None,
109
+ out_indices=None,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(**kwargs)
113
+
114
+ self.num_channels = num_channels
115
+ self.patch_size = patch_size
116
+ self.num_stages = num_stages
117
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
118
+ self.depths = [3, 3, 9, 3] if depths is None else depths
119
+ self.hidden_act = hidden_act
120
+ self.initializer_range = initializer_range
121
+ self.layer_norm_eps = layer_norm_eps
122
+ self.layer_scale_init_value = layer_scale_init_value
123
+ self.drop_path_rate = drop_path_rate
124
+ self.image_size = image_size
125
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
126
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
127
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
128
+ )
129
+
130
+
131
+ class ConvNextOnnxConfig(OnnxConfig):
132
+ torch_onnx_minimum_version = version.parse("1.11")
133
+
134
+ @property
135
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
136
+ return OrderedDict(
137
+ [
138
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
139
+ ]
140
+ )
141
+
142
+ @property
143
+ def atol_for_validation(self) -> float:
144
+ return 1e-5
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvNext checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/ConvNeXt"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+ from PIL import Image
28
+
29
+ from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_convnext_config(checkpoint_url):
38
+ config = ConvNextConfig()
39
+
40
+ if "tiny" in checkpoint_url:
41
+ depths = [3, 3, 9, 3]
42
+ hidden_sizes = [96, 192, 384, 768]
43
+ if "small" in checkpoint_url:
44
+ depths = [3, 3, 27, 3]
45
+ hidden_sizes = [96, 192, 384, 768]
46
+ if "base" in checkpoint_url:
47
+ depths = [3, 3, 27, 3]
48
+ hidden_sizes = [128, 256, 512, 1024]
49
+ if "large" in checkpoint_url:
50
+ depths = [3, 3, 27, 3]
51
+ hidden_sizes = [192, 384, 768, 1536]
52
+ if "xlarge" in checkpoint_url:
53
+ depths = [3, 3, 27, 3]
54
+ hidden_sizes = [256, 512, 1024, 2048]
55
+
56
+ if "1k" in checkpoint_url:
57
+ num_labels = 1000
58
+ filename = "imagenet-1k-id2label.json"
59
+ expected_shape = (1, 1000)
60
+ else:
61
+ num_labels = 21841
62
+ filename = "imagenet-22k-id2label.json"
63
+ expected_shape = (1, 21841)
64
+
65
+ repo_id = "huggingface/label-files"
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ if "1k" not in checkpoint_url:
70
+ # this dataset contains 21843 labels but the model only has 21841
71
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
72
+ del id2label[9205]
73
+ del id2label[15027]
74
+ config.id2label = id2label
75
+ config.label2id = {v: k for k, v in id2label.items()}
76
+ config.hidden_sizes = hidden_sizes
77
+ config.depths = depths
78
+
79
+ return config, expected_shape
80
+
81
+
82
+ def rename_key(name):
83
+ if "downsample_layers.0.0" in name:
84
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
85
+ if "downsample_layers.0.1" in name:
86
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
87
+ if "downsample_layers.1.0" in name:
88
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
89
+ if "downsample_layers.1.1" in name:
90
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
91
+ if "downsample_layers.2.0" in name:
92
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
93
+ if "downsample_layers.2.1" in name:
94
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
95
+ if "downsample_layers.3.0" in name:
96
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
97
+ if "downsample_layers.3.1" in name:
98
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
99
+ if "stages" in name and "downsampling_layer" not in name:
100
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
101
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
102
+ if "stages" in name:
103
+ name = name.replace("stages", "encoder.stages")
104
+ if "norm" in name:
105
+ name = name.replace("norm", "layernorm")
106
+ if "gamma" in name:
107
+ name = name.replace("gamma", "layer_scale_parameter")
108
+ if "head" in name:
109
+ name = name.replace("head", "classifier")
110
+
111
+ return name
112
+
113
+
114
+ # We will verify our results on an image of cute cats
115
+ def prepare_img():
116
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
117
+ im = Image.open(requests.get(url, stream=True).raw)
118
+ return im
119
+
120
+
121
+ @torch.no_grad()
122
+ def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
123
+ """
124
+ Copy/paste/tweak model's weights to our ConvNext structure.
125
+ """
126
+
127
+ # define ConvNext configuration based on URL
128
+ config, expected_shape = get_convnext_config(checkpoint_url)
129
+ # load original state_dict from URL
130
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
131
+ # rename keys
132
+ for key in state_dict.copy().keys():
133
+ val = state_dict.pop(key)
134
+ state_dict[rename_key(key)] = val
135
+ # add prefix to all keys expect classifier head
136
+ for key in state_dict.copy().keys():
137
+ val = state_dict.pop(key)
138
+ if not key.startswith("classifier"):
139
+ key = "convnext." + key
140
+ state_dict[key] = val
141
+
142
+ # load HuggingFace model
143
+ model = ConvNextForImageClassification(config)
144
+ model.load_state_dict(state_dict)
145
+ model.eval()
146
+
147
+ # Check outputs on an image, prepared by ConvNextImageProcessor
148
+ size = 224 if "224" in checkpoint_url else 384
149
+ image_processor = ConvNextImageProcessor(size=size)
150
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
151
+
152
+ logits = model(pixel_values).logits
153
+
154
+ # note: the logits below were obtained without center cropping
155
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth":
156
+ expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918])
157
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth":
158
+ expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
159
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth":
160
+ expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
161
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth":
162
+ expected_logits = torch.tensor([0.3561, 0.6350, -0.0384])
163
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth":
164
+ expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
165
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth":
166
+ expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
167
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth":
168
+ expected_logits = torch.tensor([1.2980, 0.3631, -0.1198])
169
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth":
170
+ expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
171
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth":
172
+ expected_logits = torch.tensor([1.7956, 0.8390, 0.2820])
173
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth":
174
+ expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
175
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth":
176
+ expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348])
177
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth":
178
+ expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
179
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth":
180
+ expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
181
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth":
182
+ expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
183
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth":
184
+ expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
185
+ else:
186
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
187
+
188
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3)
189
+ assert logits.shape == expected_shape
190
+
191
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
192
+ print(f"Saving model to {pytorch_dump_folder_path}")
193
+ model.save_pretrained(pytorch_dump_folder_path)
194
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
195
+ image_processor.save_pretrained(pytorch_dump_folder_path)
196
+
197
+ print("Pushing model to the hub...")
198
+ model_name = "convnext"
199
+ if "tiny" in checkpoint_url:
200
+ model_name += "-tiny"
201
+ elif "small" in checkpoint_url:
202
+ model_name += "-small"
203
+ elif "base" in checkpoint_url:
204
+ model_name += "-base"
205
+ elif "xlarge" in checkpoint_url:
206
+ model_name += "-xlarge"
207
+ elif "large" in checkpoint_url:
208
+ model_name += "-large"
209
+ if "224" in checkpoint_url:
210
+ model_name += "-224"
211
+ elif "384" in checkpoint_url:
212
+ model_name += "-384"
213
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
214
+ model_name += "-22k"
215
+ if "22k" in checkpoint_url and "1k" in checkpoint_url:
216
+ model_name += "-22k-1k"
217
+
218
+ model.push_to_hub(
219
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
220
+ organization="nielsr",
221
+ commit_message="Add model",
222
+ )
223
+
224
+
225
+ if __name__ == "__main__":
226
+ parser = argparse.ArgumentParser()
227
+ # Required parameters
228
+ parser.add_argument(
229
+ "--checkpoint_url",
230
+ default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
231
+ type=str,
232
+ help="URL of the original ConvNeXT checkpoint you'd like to convert.",
233
+ )
234
+ parser.add_argument(
235
+ "--pytorch_dump_folder_path",
236
+ default=None,
237
+ type=str,
238
+ required=True,
239
+ help="Path to the output PyTorch model directory.",
240
+ )
241
+
242
+ args = parser.parse_args()
243
+ convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ConvNeXT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_convnext import ConvNextImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ConvNextFeatureExtractor(ConvNextImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ConvNextImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ConvNeXT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ center_crop,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ IMAGENET_STANDARD_MEAN,
30
+ IMAGENET_STANDARD_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ if is_vision_available():
46
+ import PIL
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ class ConvNextImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a ConvNeXT image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
59
+ by `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
61
+ Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is
62
+ resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will
63
+ be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to
64
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can
65
+ be overriden by `size` in the `preprocess` method.
66
+ crop_pct (`float` *optional*, defaults to 224 / 256):
67
+ Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
68
+ overriden by `crop_pct` in the `preprocess` method.
69
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
70
+ Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method.
71
+ do_rescale (`bool`, *optional*, defaults to `True`):
72
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
73
+ the `preprocess` method.
74
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
75
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
76
+ method.
77
+ do_normalize (`bool`, *optional*, defaults to `True`):
78
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
79
+ method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ """
87
+
88
+ model_input_names = ["pixel_values"]
89
+
90
+ def __init__(
91
+ self,
92
+ do_resize: bool = True,
93
+ size: Dict[str, int] = None,
94
+ crop_pct: float = None,
95
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
96
+ do_rescale: bool = True,
97
+ rescale_factor: Union[int, float] = 1 / 255,
98
+ do_normalize: bool = True,
99
+ image_mean: Optional[Union[float, List[float]]] = None,
100
+ image_std: Optional[Union[float, List[float]]] = None,
101
+ **kwargs,
102
+ ) -> None:
103
+ super().__init__(**kwargs)
104
+ size = size if size is not None else {"shortest_edge": 384}
105
+ size = get_size_dict(size, default_to_square=False)
106
+
107
+ self.do_resize = do_resize
108
+ self.size = size
109
+ # Default value set here for backwards compatibility where the value in config is None
110
+ self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
111
+ self.resample = resample
112
+ self.do_rescale = do_rescale
113
+ self.rescale_factor = rescale_factor
114
+ self.do_normalize = do_normalize
115
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
116
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
117
+ self._valid_processor_keys = [
118
+ "images",
119
+ "do_resize",
120
+ "size",
121
+ "crop_pct",
122
+ "resample",
123
+ "do_rescale",
124
+ "rescale_factor",
125
+ "do_normalize",
126
+ "image_mean",
127
+ "image_std",
128
+ "return_tensors",
129
+ "data_format",
130
+ "input_data_format",
131
+ ]
132
+
133
+ def resize(
134
+ self,
135
+ image: np.ndarray,
136
+ size: Dict[str, int],
137
+ crop_pct: float,
138
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
139
+ data_format: Optional[Union[str, ChannelDimension]] = None,
140
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ **kwargs,
142
+ ) -> np.ndarray:
143
+ """
144
+ Resize an image.
145
+
146
+ Args:
147
+ image (`np.ndarray`):
148
+ Image to resize.
149
+ size (`Dict[str, int]`):
150
+ Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If
151
+ `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`.
152
+ Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`,
153
+ after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`.
154
+ crop_pct (`float`):
155
+ Percentage of the image to crop. Only has an effect if size < 384.
156
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
157
+ Resampling filter to use when resizing the image.
158
+ data_format (`str` or `ChannelDimension`, *optional*):
159
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
160
+ input_data_format (`ChannelDimension` or `str`, *optional*):
161
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
162
+ image.
163
+ """
164
+ size = get_size_dict(size, default_to_square=False)
165
+ if "shortest_edge" not in size:
166
+ raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
167
+ shortest_edge = size["shortest_edge"]
168
+
169
+ if shortest_edge < 384:
170
+ # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
171
+ resize_shortest_edge = int(shortest_edge / crop_pct)
172
+ resize_size = get_resize_output_image_size(
173
+ image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format
174
+ )
175
+ image = resize(
176
+ image=image,
177
+ size=resize_size,
178
+ resample=resample,
179
+ data_format=data_format,
180
+ input_data_format=input_data_format,
181
+ **kwargs,
182
+ )
183
+ # then crop to (shortest_edge, shortest_edge)
184
+ return center_crop(
185
+ image=image,
186
+ size=(shortest_edge, shortest_edge),
187
+ data_format=data_format,
188
+ input_data_format=input_data_format,
189
+ **kwargs,
190
+ )
191
+ else:
192
+ # warping (no cropping) when evaluated at 384 or larger
193
+ return resize(
194
+ image,
195
+ size=(shortest_edge, shortest_edge),
196
+ resample=resample,
197
+ data_format=data_format,
198
+ input_data_format=input_data_format,
199
+ **kwargs,
200
+ )
201
+
202
+ def preprocess(
203
+ self,
204
+ images: ImageInput,
205
+ do_resize: bool = None,
206
+ size: Dict[str, int] = None,
207
+ crop_pct: float = None,
208
+ resample: PILImageResampling = None,
209
+ do_rescale: bool = None,
210
+ rescale_factor: float = None,
211
+ do_normalize: bool = None,
212
+ image_mean: Optional[Union[float, List[float]]] = None,
213
+ image_std: Optional[Union[float, List[float]]] = None,
214
+ return_tensors: Optional[Union[str, TensorType]] = None,
215
+ data_format: ChannelDimension = ChannelDimension.FIRST,
216
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
217
+ **kwargs,
218
+ ) -> PIL.Image.Image:
219
+ """
220
+ Preprocess an image or batch of images.
221
+
222
+ Args:
223
+ images (`ImageInput`):
224
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
225
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
226
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
227
+ Whether to resize the image.
228
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
229
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
230
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
231
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
232
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
233
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
234
+ Percentage of the image to crop if size < 384.
235
+ resample (`int`, *optional*, defaults to `self.resample`):
236
+ Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
237
+ has an effect if `do_resize` is set to `True`.
238
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
239
+ Whether to rescale the image values between [0 - 1].
240
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
241
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
242
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
243
+ Whether to normalize the image.
244
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
245
+ Image mean.
246
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
247
+ Image standard deviation.
248
+ return_tensors (`str` or `TensorType`, *optional*):
249
+ The type of tensors to return. Can be one of:
250
+ - Unset: Return a list of `np.ndarray`.
251
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
252
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
253
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
254
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
255
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
256
+ The channel dimension format for the output image. Can be one of:
257
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
258
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
259
+ - Unset: Use the channel dimension format of the input image.
260
+ input_data_format (`ChannelDimension` or `str`, *optional*):
261
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
262
+ from the input image. Can be one of:
263
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
264
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
265
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
266
+ """
267
+ do_resize = do_resize if do_resize is not None else self.do_resize
268
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
269
+ resample = resample if resample is not None else self.resample
270
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
271
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
272
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
273
+ image_mean = image_mean if image_mean is not None else self.image_mean
274
+ image_std = image_std if image_std is not None else self.image_std
275
+
276
+ size = size if size is not None else self.size
277
+ size = get_size_dict(size, default_to_square=False)
278
+
279
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
280
+
281
+ images = make_list_of_images(images)
282
+
283
+ if not valid_images(images):
284
+ raise ValueError(
285
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
286
+ "torch.Tensor, tf.Tensor or jax.ndarray."
287
+ )
288
+
289
+ validate_preprocess_arguments(
290
+ do_rescale=do_rescale,
291
+ rescale_factor=rescale_factor,
292
+ do_normalize=do_normalize,
293
+ image_mean=image_mean,
294
+ image_std=image_std,
295
+ do_resize=do_resize,
296
+ size=size,
297
+ resample=resample,
298
+ )
299
+
300
+ # All transformations expect numpy arrays.
301
+ images = [to_numpy_array(image) for image in images]
302
+
303
+ if is_scaled_image(images[0]) and do_rescale:
304
+ logger.warning_once(
305
+ "It looks like you are trying to rescale already rescaled images. If the input"
306
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
307
+ )
308
+
309
+ if input_data_format is None:
310
+ # We assume that all images have the same channel dimension format.
311
+ input_data_format = infer_channel_dimension_format(images[0])
312
+
313
+ if do_resize:
314
+ images = [
315
+ self.resize(
316
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
317
+ )
318
+ for image in images
319
+ ]
320
+
321
+ if do_rescale:
322
+ images = [
323
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
324
+ for image in images
325
+ ]
326
+
327
+ if do_normalize:
328
+ images = [
329
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
330
+ for image in images
331
+ ]
332
+
333
+ images = [
334
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
335
+ ]
336
+
337
+ data = {"pixel_values": images}
338
+ return BatchFeature(data=data, tensor_type=return_tensors)
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvNext model."""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_convnext import ConvNextConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ConvNextConfig"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
56
+
57
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
58
+ "facebook/convnext-tiny-224",
59
+ # See all ConvNext models at https://huggingface.co/models?filter=convnext
60
+ ]
61
+
62
+
63
+ # Copied from transformers.models.beit.modeling_beit.drop_path
64
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
65
+ """
66
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
67
+
68
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
69
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
70
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
71
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
72
+ argument.
73
+ """
74
+ if drop_prob == 0.0 or not training:
75
+ return input
76
+ keep_prob = 1 - drop_prob
77
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
78
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
79
+ random_tensor.floor_() # binarize
80
+ output = input.div(keep_prob) * random_tensor
81
+ return output
82
+
83
+
84
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
85
+ class ConvNextDropPath(nn.Module):
86
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
87
+
88
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
89
+ super().__init__()
90
+ self.drop_prob = drop_prob
91
+
92
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
93
+ return drop_path(hidden_states, self.drop_prob, self.training)
94
+
95
+ def extra_repr(self) -> str:
96
+ return "p={}".format(self.drop_prob)
97
+
98
+
99
+ class ConvNextLayerNorm(nn.Module):
100
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
101
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
102
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
103
+ """
104
+
105
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
106
+ super().__init__()
107
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
108
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
109
+ self.eps = eps
110
+ self.data_format = data_format
111
+ if self.data_format not in ["channels_last", "channels_first"]:
112
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
113
+ self.normalized_shape = (normalized_shape,)
114
+
115
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
116
+ if self.data_format == "channels_last":
117
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
118
+ elif self.data_format == "channels_first":
119
+ input_dtype = x.dtype
120
+ x = x.float()
121
+ u = x.mean(1, keepdim=True)
122
+ s = (x - u).pow(2).mean(1, keepdim=True)
123
+ x = (x - u) / torch.sqrt(s + self.eps)
124
+ x = x.to(dtype=input_dtype)
125
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
126
+ return x
127
+
128
+
129
+ class ConvNextEmbeddings(nn.Module):
130
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
131
+ found in src/transformers/models/swin/modeling_swin.py.
132
+ """
133
+
134
+ def __init__(self, config):
135
+ super().__init__()
136
+ self.patch_embeddings = nn.Conv2d(
137
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
138
+ )
139
+ self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
140
+ self.num_channels = config.num_channels
141
+
142
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
143
+ num_channels = pixel_values.shape[1]
144
+ if num_channels != self.num_channels:
145
+ raise ValueError(
146
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
147
+ )
148
+ embeddings = self.patch_embeddings(pixel_values)
149
+ embeddings = self.layernorm(embeddings)
150
+ return embeddings
151
+
152
+
153
+ class ConvNextLayer(nn.Module):
154
+ """This corresponds to the `Block` class in the original implementation.
155
+
156
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
157
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
158
+
159
+ The authors used (2) as they find it slightly faster in PyTorch.
160
+
161
+ Args:
162
+ config ([`ConvNextConfig`]): Model configuration class.
163
+ dim (`int`): Number of input channels.
164
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
165
+ """
166
+
167
+ def __init__(self, config, dim, drop_path=0):
168
+ super().__init__()
169
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
170
+ self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
171
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
172
+ self.act = ACT2FN[config.hidden_act]
173
+ self.pwconv2 = nn.Linear(4 * dim, dim)
174
+ self.layer_scale_parameter = (
175
+ nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
176
+ if config.layer_scale_init_value > 0
177
+ else None
178
+ )
179
+ self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
180
+
181
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
182
+ input = hidden_states
183
+ x = self.dwconv(hidden_states)
184
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
185
+ x = self.layernorm(x)
186
+ x = self.pwconv1(x)
187
+ x = self.act(x)
188
+ x = self.pwconv2(x)
189
+ if self.layer_scale_parameter is not None:
190
+ x = self.layer_scale_parameter * x
191
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
192
+
193
+ x = input + self.drop_path(x)
194
+ return x
195
+
196
+
197
+ class ConvNextStage(nn.Module):
198
+ """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
199
+
200
+ Args:
201
+ config ([`ConvNextConfig`]): Model configuration class.
202
+ in_channels (`int`): Number of input channels.
203
+ out_channels (`int`): Number of output channels.
204
+ depth (`int`): Number of residual blocks.
205
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
206
+ """
207
+
208
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
209
+ super().__init__()
210
+
211
+ if in_channels != out_channels or stride > 1:
212
+ self.downsampling_layer = nn.Sequential(
213
+ ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
214
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
215
+ )
216
+ else:
217
+ self.downsampling_layer = nn.Identity()
218
+ drop_path_rates = drop_path_rates or [0.0] * depth
219
+ self.layers = nn.Sequential(
220
+ *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
221
+ )
222
+
223
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
224
+ hidden_states = self.downsampling_layer(hidden_states)
225
+ hidden_states = self.layers(hidden_states)
226
+ return hidden_states
227
+
228
+
229
+ class ConvNextEncoder(nn.Module):
230
+ def __init__(self, config):
231
+ super().__init__()
232
+ self.stages = nn.ModuleList()
233
+ drop_path_rates = [
234
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
235
+ ]
236
+ prev_chs = config.hidden_sizes[0]
237
+ for i in range(config.num_stages):
238
+ out_chs = config.hidden_sizes[i]
239
+ stage = ConvNextStage(
240
+ config,
241
+ in_channels=prev_chs,
242
+ out_channels=out_chs,
243
+ stride=2 if i > 0 else 1,
244
+ depth=config.depths[i],
245
+ drop_path_rates=drop_path_rates[i],
246
+ )
247
+ self.stages.append(stage)
248
+ prev_chs = out_chs
249
+
250
+ def forward(
251
+ self,
252
+ hidden_states: torch.FloatTensor,
253
+ output_hidden_states: Optional[bool] = False,
254
+ return_dict: Optional[bool] = True,
255
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
256
+ all_hidden_states = () if output_hidden_states else None
257
+
258
+ for i, layer_module in enumerate(self.stages):
259
+ if output_hidden_states:
260
+ all_hidden_states = all_hidden_states + (hidden_states,)
261
+
262
+ hidden_states = layer_module(hidden_states)
263
+
264
+ if output_hidden_states:
265
+ all_hidden_states = all_hidden_states + (hidden_states,)
266
+
267
+ if not return_dict:
268
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
269
+
270
+ return BaseModelOutputWithNoAttention(
271
+ last_hidden_state=hidden_states,
272
+ hidden_states=all_hidden_states,
273
+ )
274
+
275
+
276
+ class ConvNextPreTrainedModel(PreTrainedModel):
277
+ """
278
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
279
+ models.
280
+ """
281
+
282
+ config_class = ConvNextConfig
283
+ base_model_prefix = "convnext"
284
+ main_input_name = "pixel_values"
285
+
286
+ def _init_weights(self, module):
287
+ """Initialize the weights"""
288
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
289
+ # Slightly different from the TF version which uses truncated_normal for initialization
290
+ # cf https://github.com/pytorch/pytorch/pull/5617
291
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
292
+ if module.bias is not None:
293
+ module.bias.data.zero_()
294
+ elif isinstance(module, nn.LayerNorm):
295
+ module.bias.data.zero_()
296
+ module.weight.data.fill_(1.0)
297
+
298
+
299
+ CONVNEXT_START_DOCSTRING = r"""
300
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
301
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
302
+ behavior.
303
+
304
+ Parameters:
305
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
306
+ Initializing with a config file does not load the weights associated with the model, only the
307
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
308
+ """
309
+
310
+ CONVNEXT_INPUTS_DOCSTRING = r"""
311
+ Args:
312
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
313
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
314
+ [`ConvNextImageProcessor.__call__`] for details.
315
+
316
+ output_hidden_states (`bool`, *optional*):
317
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
318
+ more detail.
319
+ return_dict (`bool`, *optional*):
320
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
321
+ """
322
+
323
+
324
+ @add_start_docstrings(
325
+ "The bare ConvNext model outputting raw features without any specific head on top.",
326
+ CONVNEXT_START_DOCSTRING,
327
+ )
328
+ class ConvNextModel(ConvNextPreTrainedModel):
329
+ def __init__(self, config):
330
+ super().__init__(config)
331
+ self.config = config
332
+
333
+ self.embeddings = ConvNextEmbeddings(config)
334
+ self.encoder = ConvNextEncoder(config)
335
+
336
+ # final layernorm layer
337
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
338
+
339
+ # Initialize weights and apply final processing
340
+ self.post_init()
341
+
342
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
343
+ @add_code_sample_docstrings(
344
+ checkpoint=_CHECKPOINT_FOR_DOC,
345
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
346
+ config_class=_CONFIG_FOR_DOC,
347
+ modality="vision",
348
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
349
+ )
350
+ def forward(
351
+ self,
352
+ pixel_values: torch.FloatTensor = None,
353
+ output_hidden_states: Optional[bool] = None,
354
+ return_dict: Optional[bool] = None,
355
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
356
+ output_hidden_states = (
357
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
358
+ )
359
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
360
+
361
+ if pixel_values is None:
362
+ raise ValueError("You have to specify pixel_values")
363
+
364
+ embedding_output = self.embeddings(pixel_values)
365
+
366
+ encoder_outputs = self.encoder(
367
+ embedding_output,
368
+ output_hidden_states=output_hidden_states,
369
+ return_dict=return_dict,
370
+ )
371
+
372
+ last_hidden_state = encoder_outputs[0]
373
+
374
+ # global average pooling, (N, C, H, W) -> (N, C)
375
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
376
+
377
+ if not return_dict:
378
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
379
+
380
+ return BaseModelOutputWithPoolingAndNoAttention(
381
+ last_hidden_state=last_hidden_state,
382
+ pooler_output=pooled_output,
383
+ hidden_states=encoder_outputs.hidden_states,
384
+ )
385
+
386
+
387
+ @add_start_docstrings(
388
+ """
389
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
390
+ ImageNet.
391
+ """,
392
+ CONVNEXT_START_DOCSTRING,
393
+ )
394
+ class ConvNextForImageClassification(ConvNextPreTrainedModel):
395
+ def __init__(self, config):
396
+ super().__init__(config)
397
+
398
+ self.num_labels = config.num_labels
399
+ self.convnext = ConvNextModel(config)
400
+
401
+ # Classifier head
402
+ self.classifier = (
403
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
404
+ )
405
+
406
+ # Initialize weights and apply final processing
407
+ self.post_init()
408
+
409
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
410
+ @add_code_sample_docstrings(
411
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
412
+ output_type=ImageClassifierOutputWithNoAttention,
413
+ config_class=_CONFIG_FOR_DOC,
414
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
415
+ )
416
+ def forward(
417
+ self,
418
+ pixel_values: torch.FloatTensor = None,
419
+ labels: Optional[torch.LongTensor] = None,
420
+ output_hidden_states: Optional[bool] = None,
421
+ return_dict: Optional[bool] = None,
422
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
423
+ r"""
424
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
425
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
426
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
427
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
428
+ """
429
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
430
+
431
+ outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
432
+
433
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
434
+
435
+ logits = self.classifier(pooled_output)
436
+
437
+ loss = None
438
+ if labels is not None:
439
+ if self.config.problem_type is None:
440
+ if self.num_labels == 1:
441
+ self.config.problem_type = "regression"
442
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
443
+ self.config.problem_type = "single_label_classification"
444
+ else:
445
+ self.config.problem_type = "multi_label_classification"
446
+
447
+ if self.config.problem_type == "regression":
448
+ loss_fct = MSELoss()
449
+ if self.num_labels == 1:
450
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
451
+ else:
452
+ loss = loss_fct(logits, labels)
453
+ elif self.config.problem_type == "single_label_classification":
454
+ loss_fct = CrossEntropyLoss()
455
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
456
+ elif self.config.problem_type == "multi_label_classification":
457
+ loss_fct = BCEWithLogitsLoss()
458
+ loss = loss_fct(logits, labels)
459
+ if not return_dict:
460
+ output = (logits,) + outputs[2:]
461
+ return ((loss,) + output) if loss is not None else output
462
+
463
+ return ImageClassifierOutputWithNoAttention(
464
+ loss=loss,
465
+ logits=logits,
466
+ hidden_states=outputs.hidden_states,
467
+ )
468
+
469
+
470
+ @add_start_docstrings(
471
+ """
472
+ ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
473
+ """,
474
+ CONVNEXT_START_DOCSTRING,
475
+ )
476
+ class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):
477
+ def __init__(self, config):
478
+ super().__init__(config)
479
+ super()._init_backbone(config)
480
+
481
+ self.embeddings = ConvNextEmbeddings(config)
482
+ self.encoder = ConvNextEncoder(config)
483
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
484
+
485
+ # Add layer norms to hidden states of out_features
486
+ hidden_states_norms = {}
487
+ for stage, num_channels in zip(self._out_features, self.channels):
488
+ hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first")
489
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
490
+
491
+ # initialize weights and apply final processing
492
+ self.post_init()
493
+
494
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
495
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
496
+ def forward(
497
+ self,
498
+ pixel_values: torch.Tensor,
499
+ output_hidden_states: Optional[bool] = None,
500
+ return_dict: Optional[bool] = None,
501
+ ) -> BackboneOutput:
502
+ """
503
+ Returns:
504
+
505
+ Examples:
506
+
507
+ ```python
508
+ >>> from transformers import AutoImageProcessor, AutoBackbone
509
+ >>> import torch
510
+ >>> from PIL import Image
511
+ >>> import requests
512
+
513
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
514
+ >>> image = Image.open(requests.get(url, stream=True).raw)
515
+
516
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
517
+ >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224")
518
+
519
+ >>> inputs = processor(image, return_tensors="pt")
520
+ >>> outputs = model(**inputs)
521
+ ```"""
522
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
523
+ output_hidden_states = (
524
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
525
+ )
526
+
527
+ embedding_output = self.embeddings(pixel_values)
528
+
529
+ outputs = self.encoder(
530
+ embedding_output,
531
+ output_hidden_states=True,
532
+ return_dict=return_dict,
533
+ )
534
+
535
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
536
+
537
+ feature_maps = ()
538
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
539
+ if stage in self.out_features:
540
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
541
+ feature_maps += (hidden_state,)
542
+
543
+ if not return_dict:
544
+ output = (feature_maps,)
545
+ if output_hidden_states:
546
+ output += (hidden_states,)
547
+ return output
548
+
549
+ return BackboneOutput(
550
+ feature_maps=feature_maps,
551
+ hidden_states=hidden_states if output_hidden_states else None,
552
+ attentions=None,
553
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvNext model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
27
+ from ...modeling_tf_utils import (
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras,
33
+ keras_serializable,
34
+ unpack_inputs,
35
+ )
36
+ from ...tf_utils import shape_list
37
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
38
+ from .configuration_convnext import ConvNextConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ _CONFIG_FOR_DOC = "ConvNextConfig"
45
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
46
+
47
+
48
+ class TFConvNextDropPath(keras.layers.Layer):
49
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
50
+ References:
51
+ (1) github.com:rwightman/pytorch-image-models
52
+ """
53
+
54
+ def __init__(self, drop_path: float, **kwargs):
55
+ super().__init__(**kwargs)
56
+ self.drop_path = drop_path
57
+
58
+ def call(self, x: tf.Tensor, training=None):
59
+ if training:
60
+ keep_prob = 1 - self.drop_path
61
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
62
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
63
+ random_tensor = tf.floor(random_tensor)
64
+ return (x / keep_prob) * random_tensor
65
+ return x
66
+
67
+
68
+ class TFConvNextEmbeddings(keras.layers.Layer):
69
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
70
+ found in src/transformers/models/swin/modeling_swin.py.
71
+ """
72
+
73
+ def __init__(self, config: ConvNextConfig, **kwargs):
74
+ super().__init__(**kwargs)
75
+ self.patch_embeddings = keras.layers.Conv2D(
76
+ filters=config.hidden_sizes[0],
77
+ kernel_size=config.patch_size,
78
+ strides=config.patch_size,
79
+ name="patch_embeddings",
80
+ kernel_initializer=get_initializer(config.initializer_range),
81
+ bias_initializer=keras.initializers.Zeros(),
82
+ )
83
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
84
+ self.num_channels = config.num_channels
85
+ self.config = config
86
+
87
+ def call(self, pixel_values):
88
+ if isinstance(pixel_values, dict):
89
+ pixel_values = pixel_values["pixel_values"]
90
+
91
+ tf.debugging.assert_equal(
92
+ shape_list(pixel_values)[1],
93
+ self.num_channels,
94
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
95
+ )
96
+
97
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
98
+ # So change the input format from `NCHW` to `NHWC`.
99
+ # shape = (batch_size, in_height, in_width, in_channels)
100
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
101
+
102
+ embeddings = self.patch_embeddings(pixel_values)
103
+ embeddings = self.layernorm(embeddings)
104
+ return embeddings
105
+
106
+ def build(self, input_shape=None):
107
+ if self.built:
108
+ return
109
+ self.built = True
110
+ if getattr(self, "patch_embeddings", None) is not None:
111
+ with tf.name_scope(self.patch_embeddings.name):
112
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
113
+ if getattr(self, "layernorm", None) is not None:
114
+ with tf.name_scope(self.layernorm.name):
115
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
116
+
117
+
118
+ class TFConvNextLayer(keras.layers.Layer):
119
+ """This corresponds to the `Block` class in the original implementation.
120
+
121
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
122
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
123
+
124
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
125
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
126
+
127
+ Args:
128
+ config ([`ConvNextConfig`]): Model configuration class.
129
+ dim (`int`): Number of input channels.
130
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
131
+ """
132
+
133
+ def __init__(self, config, dim, drop_path=0.0, **kwargs):
134
+ super().__init__(**kwargs)
135
+ self.dim = dim
136
+ self.config = config
137
+ self.dwconv = keras.layers.Conv2D(
138
+ filters=dim,
139
+ kernel_size=7,
140
+ padding="same",
141
+ groups=dim,
142
+ kernel_initializer=get_initializer(config.initializer_range),
143
+ bias_initializer="zeros",
144
+ name="dwconv",
145
+ ) # depthwise conv
146
+ self.layernorm = keras.layers.LayerNormalization(
147
+ epsilon=1e-6,
148
+ name="layernorm",
149
+ )
150
+ self.pwconv1 = keras.layers.Dense(
151
+ units=4 * dim,
152
+ kernel_initializer=get_initializer(config.initializer_range),
153
+ bias_initializer="zeros",
154
+ name="pwconv1",
155
+ ) # pointwise/1x1 convs, implemented with linear layers
156
+ self.act = get_tf_activation(config.hidden_act)
157
+ self.pwconv2 = keras.layers.Dense(
158
+ units=dim,
159
+ kernel_initializer=get_initializer(config.initializer_range),
160
+ bias_initializer="zeros",
161
+ name="pwconv2",
162
+ )
163
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
164
+ # behaviour.
165
+ self.drop_path = (
166
+ TFConvNextDropPath(drop_path, name="drop_path")
167
+ if drop_path > 0.0
168
+ else keras.layers.Activation("linear", name="drop_path")
169
+ )
170
+
171
+ def build(self, input_shape: tf.TensorShape = None):
172
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
173
+ self.layer_scale_parameter = (
174
+ self.add_weight(
175
+ shape=(self.dim,),
176
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
177
+ trainable=True,
178
+ name="layer_scale_parameter",
179
+ )
180
+ if self.config.layer_scale_init_value > 0
181
+ else None
182
+ )
183
+
184
+ if self.built:
185
+ return
186
+ self.built = True
187
+ if getattr(self, "dwconv", None) is not None:
188
+ with tf.name_scope(self.dwconv.name):
189
+ self.dwconv.build([None, None, None, self.dim])
190
+ if getattr(self, "layernorm", None) is not None:
191
+ with tf.name_scope(self.layernorm.name):
192
+ self.layernorm.build([None, None, None, self.dim])
193
+ if getattr(self, "pwconv1", None) is not None:
194
+ with tf.name_scope(self.pwconv1.name):
195
+ self.pwconv1.build([None, None, self.dim])
196
+ if getattr(self, "pwconv2", None) is not None:
197
+ with tf.name_scope(self.pwconv2.name):
198
+ self.pwconv2.build([None, None, 4 * self.dim])
199
+ if getattr(self, "drop_path", None) is not None:
200
+ with tf.name_scope(self.drop_path.name):
201
+ self.drop_path.build(None)
202
+
203
+ def call(self, hidden_states, training=False):
204
+ input = hidden_states
205
+ x = self.dwconv(hidden_states)
206
+ x = self.layernorm(x)
207
+ x = self.pwconv1(x)
208
+ x = self.act(x)
209
+ x = self.pwconv2(x)
210
+
211
+ if self.layer_scale_parameter is not None:
212
+ x = self.layer_scale_parameter * x
213
+
214
+ x = input + self.drop_path(x, training=training)
215
+ return x
216
+
217
+
218
+ class TFConvNextStage(keras.layers.Layer):
219
+ """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
220
+
221
+ Args:
222
+ config (`ConvNextV2Config`):
223
+ Model configuration class.
224
+ in_channels (`int`):
225
+ Number of input channels.
226
+ out_channels (`int`):
227
+ Number of output channels.
228
+ depth (`int`):
229
+ Number of residual blocks.
230
+ drop_path_rates(`List[float]`):
231
+ Stochastic depth rates for each layer.
232
+ """
233
+
234
+ def __init__(
235
+ self,
236
+ config: ConvNextConfig,
237
+ in_channels: int,
238
+ out_channels: int,
239
+ kernel_size: int = 2,
240
+ stride: int = 2,
241
+ depth: int = 2,
242
+ drop_path_rates: Optional[List[float]] = None,
243
+ **kwargs,
244
+ ):
245
+ super().__init__(**kwargs)
246
+ if in_channels != out_channels or stride > 1:
247
+ self.downsampling_layer = [
248
+ keras.layers.LayerNormalization(
249
+ epsilon=1e-6,
250
+ name="downsampling_layer.0",
251
+ ),
252
+ # Inputs to this layer will follow NHWC format since we
253
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
254
+ # layer. All the outputs throughout the model will be in NHWC
255
+ # from this point on until the output where we again change to
256
+ # NCHW.
257
+ keras.layers.Conv2D(
258
+ filters=out_channels,
259
+ kernel_size=kernel_size,
260
+ strides=stride,
261
+ kernel_initializer=get_initializer(config.initializer_range),
262
+ bias_initializer=keras.initializers.Zeros(),
263
+ name="downsampling_layer.1",
264
+ ),
265
+ ]
266
+ else:
267
+ self.downsampling_layer = [tf.identity]
268
+
269
+ drop_path_rates = drop_path_rates or [0.0] * depth
270
+ self.layers = [
271
+ TFConvNextLayer(
272
+ config,
273
+ dim=out_channels,
274
+ drop_path=drop_path_rates[j],
275
+ name=f"layers.{j}",
276
+ )
277
+ for j in range(depth)
278
+ ]
279
+ self.in_channels = in_channels
280
+ self.out_channels = out_channels
281
+ self.stride = stride
282
+
283
+ def call(self, hidden_states):
284
+ for layer in self.downsampling_layer:
285
+ hidden_states = layer(hidden_states)
286
+ for layer in self.layers:
287
+ hidden_states = layer(hidden_states)
288
+ return hidden_states
289
+
290
+ def build(self, input_shape=None):
291
+ if self.built:
292
+ return
293
+ self.built = True
294
+ if getattr(self, "layers", None) is not None:
295
+ for layer in self.layers:
296
+ with tf.name_scope(layer.name):
297
+ layer.build(None)
298
+ if self.in_channels != self.out_channels or self.stride > 1:
299
+ with tf.name_scope(self.downsampling_layer[0].name):
300
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
301
+ with tf.name_scope(self.downsampling_layer[1].name):
302
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
303
+
304
+
305
+ class TFConvNextEncoder(keras.layers.Layer):
306
+ def __init__(self, config, **kwargs):
307
+ super().__init__(**kwargs)
308
+ self.stages = []
309
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
310
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
311
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
312
+ prev_chs = config.hidden_sizes[0]
313
+ for i in range(config.num_stages):
314
+ out_chs = config.hidden_sizes[i]
315
+ stage = TFConvNextStage(
316
+ config,
317
+ in_channels=prev_chs,
318
+ out_channels=out_chs,
319
+ stride=2 if i > 0 else 1,
320
+ depth=config.depths[i],
321
+ drop_path_rates=drop_path_rates[i],
322
+ name=f"stages.{i}",
323
+ )
324
+ self.stages.append(stage)
325
+ prev_chs = out_chs
326
+
327
+ def call(self, hidden_states, output_hidden_states=False, return_dict=True):
328
+ all_hidden_states = () if output_hidden_states else None
329
+
330
+ for i, layer_module in enumerate(self.stages):
331
+ if output_hidden_states:
332
+ all_hidden_states = all_hidden_states + (hidden_states,)
333
+
334
+ hidden_states = layer_module(hidden_states)
335
+
336
+ if output_hidden_states:
337
+ all_hidden_states = all_hidden_states + (hidden_states,)
338
+
339
+ if not return_dict:
340
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
341
+
342
+ return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
343
+
344
+ def build(self, input_shape=None):
345
+ for stage in self.stages:
346
+ with tf.name_scope(stage.name):
347
+ stage.build(None)
348
+
349
+
350
+ @keras_serializable
351
+ class TFConvNextMainLayer(keras.layers.Layer):
352
+ config_class = ConvNextConfig
353
+
354
+ def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
355
+ super().__init__(**kwargs)
356
+
357
+ self.config = config
358
+ self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
359
+ self.encoder = TFConvNextEncoder(config, name="encoder")
360
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
361
+ # We are setting the `data_format` like so because from here on we will revert to the
362
+ # NCHW output format
363
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
364
+
365
+ @unpack_inputs
366
+ def call(
367
+ self,
368
+ pixel_values: TFModelInputType | None = None,
369
+ output_hidden_states: Optional[bool] = None,
370
+ return_dict: Optional[bool] = None,
371
+ training: bool = False,
372
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
373
+ output_hidden_states = (
374
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
375
+ )
376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
377
+
378
+ if pixel_values is None:
379
+ raise ValueError("You have to specify pixel_values")
380
+
381
+ embedding_output = self.embeddings(pixel_values, training=training)
382
+
383
+ encoder_outputs = self.encoder(
384
+ embedding_output,
385
+ output_hidden_states=output_hidden_states,
386
+ return_dict=return_dict,
387
+ training=training,
388
+ )
389
+
390
+ last_hidden_state = encoder_outputs[0]
391
+ # Change to NCHW output format have uniformity in the modules
392
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
393
+ pooled_output = self.layernorm(self.pooler(last_hidden_state))
394
+
395
+ # Change the other hidden state outputs to NCHW as well
396
+ if output_hidden_states:
397
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
398
+
399
+ if not return_dict:
400
+ hidden_states = hidden_states if output_hidden_states else ()
401
+ return (last_hidden_state, pooled_output) + hidden_states
402
+
403
+ return TFBaseModelOutputWithPooling(
404
+ last_hidden_state=last_hidden_state,
405
+ pooler_output=pooled_output,
406
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
407
+ )
408
+
409
+ def build(self, input_shape=None):
410
+ if self.built:
411
+ return
412
+ self.built = True
413
+ if getattr(self, "embeddings", None) is not None:
414
+ with tf.name_scope(self.embeddings.name):
415
+ self.embeddings.build(None)
416
+ if getattr(self, "encoder", None) is not None:
417
+ with tf.name_scope(self.encoder.name):
418
+ self.encoder.build(None)
419
+ if getattr(self, "layernorm", None) is not None:
420
+ with tf.name_scope(self.layernorm.name):
421
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
422
+
423
+
424
+ class TFConvNextPreTrainedModel(TFPreTrainedModel):
425
+ """
426
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
427
+ models.
428
+ """
429
+
430
+ config_class = ConvNextConfig
431
+ base_model_prefix = "convnext"
432
+ main_input_name = "pixel_values"
433
+
434
+
435
+ CONVNEXT_START_DOCSTRING = r"""
436
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
437
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
438
+ etc.)
439
+
440
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
441
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
442
+ behavior.
443
+
444
+ <Tip>
445
+
446
+ TensorFlow models and layers in `transformers` accept two formats as input:
447
+
448
+ - having all inputs as keyword arguments (like PyTorch models), or
449
+ - having all inputs as a list, tuple or dict in the first positional argument.
450
+
451
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
452
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
453
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
454
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
455
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
456
+ positional argument:
457
+
458
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
459
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
460
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
461
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
462
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
463
+
464
+ Note that when creating models and layers with
465
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
466
+ about any of this, as you can just pass inputs like you would to any other Python function!
467
+
468
+ </Tip>
469
+
470
+ Parameters:
471
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
472
+ Initializing with a config file does not load the weights associated with the model, only the
473
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
474
+ """
475
+
476
+ CONVNEXT_INPUTS_DOCSTRING = r"""
477
+ Args:
478
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
479
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
480
+ [`ConvNextImageProcessor.__call__`] for details.
481
+
482
+ output_hidden_states (`bool`, *optional*):
483
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
484
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
485
+ used instead.
486
+ return_dict (`bool`, *optional*):
487
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
488
+ eager mode, in graph mode the value will always be set to True.
489
+ """
490
+
491
+
492
+ @add_start_docstrings(
493
+ "The bare ConvNext model outputting raw features without any specific head on top.",
494
+ CONVNEXT_START_DOCSTRING,
495
+ )
496
+ class TFConvNextModel(TFConvNextPreTrainedModel):
497
+ def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
498
+ super().__init__(config, *inputs, **kwargs)
499
+ self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
500
+
501
+ @unpack_inputs
502
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
503
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
504
+ def call(
505
+ self,
506
+ pixel_values: TFModelInputType | None = None,
507
+ output_hidden_states: Optional[bool] = None,
508
+ return_dict: Optional[bool] = None,
509
+ training: bool = False,
510
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
511
+ r"""
512
+ Returns:
513
+
514
+ Examples:
515
+
516
+ ```python
517
+ >>> from transformers import AutoImageProcessor, TFConvNextModel
518
+ >>> from PIL import Image
519
+ >>> import requests
520
+
521
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
522
+ >>> image = Image.open(requests.get(url, stream=True).raw)
523
+
524
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
525
+ >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
526
+
527
+ >>> inputs = image_processor(images=image, return_tensors="tf")
528
+ >>> outputs = model(**inputs)
529
+ >>> last_hidden_states = outputs.last_hidden_state
530
+ ```"""
531
+ output_hidden_states = (
532
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
533
+ )
534
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
535
+
536
+ if pixel_values is None:
537
+ raise ValueError("You have to specify pixel_values")
538
+
539
+ outputs = self.convnext(
540
+ pixel_values=pixel_values,
541
+ output_hidden_states=output_hidden_states,
542
+ return_dict=return_dict,
543
+ training=training,
544
+ )
545
+
546
+ if not return_dict:
547
+ return (outputs[0],) + outputs[1:]
548
+
549
+ return TFBaseModelOutputWithPooling(
550
+ last_hidden_state=outputs.last_hidden_state,
551
+ pooler_output=outputs.pooler_output,
552
+ hidden_states=outputs.hidden_states,
553
+ )
554
+
555
+ def build(self, input_shape=None):
556
+ if self.built:
557
+ return
558
+ self.built = True
559
+ if getattr(self, "convnext", None) is not None:
560
+ with tf.name_scope(self.convnext.name):
561
+ self.convnext.build(None)
562
+
563
+
564
+ @add_start_docstrings(
565
+ """
566
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
567
+ ImageNet.
568
+ """,
569
+ CONVNEXT_START_DOCSTRING,
570
+ )
571
+ class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
572
+ def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
573
+ super().__init__(config, *inputs, **kwargs)
574
+
575
+ self.num_labels = config.num_labels
576
+ self.convnext = TFConvNextMainLayer(config, name="convnext")
577
+
578
+ # Classifier head
579
+ self.classifier = keras.layers.Dense(
580
+ units=config.num_labels,
581
+ kernel_initializer=get_initializer(config.initializer_range),
582
+ bias_initializer="zeros",
583
+ name="classifier",
584
+ )
585
+ self.config = config
586
+
587
+ @unpack_inputs
588
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
589
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
590
+ def call(
591
+ self,
592
+ pixel_values: TFModelInputType | None = None,
593
+ output_hidden_states: Optional[bool] = None,
594
+ return_dict: Optional[bool] = None,
595
+ labels: np.ndarray | tf.Tensor | None = None,
596
+ training: Optional[bool] = False,
597
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
598
+ r"""
599
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
600
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
601
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
602
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
603
+
604
+ Returns:
605
+
606
+ Examples:
607
+
608
+ ```python
609
+ >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification
610
+ >>> import tensorflow as tf
611
+ >>> from PIL import Image
612
+ >>> import requests
613
+
614
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
615
+ >>> image = Image.open(requests.get(url, stream=True).raw)
616
+
617
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
618
+ >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
619
+
620
+ >>> inputs = image_processor(images=image, return_tensors="tf")
621
+ >>> outputs = model(**inputs)
622
+ >>> logits = outputs.logits
623
+ >>> # model predicts one of the 1000 ImageNet classes
624
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
625
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
626
+ ```"""
627
+ output_hidden_states = (
628
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
629
+ )
630
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
631
+
632
+ if pixel_values is None:
633
+ raise ValueError("You have to specify pixel_values")
634
+
635
+ outputs = self.convnext(
636
+ pixel_values,
637
+ output_hidden_states=output_hidden_states,
638
+ return_dict=return_dict,
639
+ training=training,
640
+ )
641
+
642
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
643
+
644
+ logits = self.classifier(pooled_output)
645
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
646
+
647
+ if not return_dict:
648
+ output = (logits,) + outputs[2:]
649
+ return ((loss,) + output) if loss is not None else output
650
+
651
+ return TFSequenceClassifierOutput(
652
+ loss=loss,
653
+ logits=logits,
654
+ hidden_states=outputs.hidden_states,
655
+ )
656
+
657
+ def build(self, input_shape=None):
658
+ if self.built:
659
+ return
660
+ self.built = True
661
+ if getattr(self, "convnext", None) is not None:
662
+ with tf.name_scope(self.convnext.name):
663
+ self.convnext.build(None)
664
+ if getattr(self, "classifier", None) is not None:
665
+ if hasattr(self.classifier, "name"):
666
+ with tf.name_scope(self.classifier.name):
667
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc ADDED
Binary file (6.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DiT checkpoints from the unilm repository."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
28
+ from transformers.image_utils import PILImageResampling
29
+ from transformers.utils import logging
30
+
31
+
32
+ logging.set_verbosity_info()
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ # here we list all keys to be renamed (original name on the left, our name on the right)
37
+ def create_rename_keys(config, has_lm_head=False, is_semantic=False):
38
+ prefix = "backbone." if is_semantic else ""
39
+
40
+ rename_keys = []
41
+ for i in range(config.num_hidden_layers):
42
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
43
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
44
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
45
+ rename_keys.append(
46
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
47
+ )
48
+ rename_keys.append(
49
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
50
+ )
51
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
52
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
53
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
54
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
55
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
56
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
57
+
58
+ # projection layer + position embeddings
59
+ rename_keys.extend(
60
+ [
61
+ (f"{prefix}cls_token", "beit.embeddings.cls_token"),
62
+ (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
63
+ (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
64
+ (f"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
65
+ ]
66
+ )
67
+
68
+ if has_lm_head:
69
+ # mask token + layernorm
70
+ rename_keys.extend(
71
+ [
72
+ ("mask_token", "beit.embeddings.mask_token"),
73
+ ("norm.weight", "layernorm.weight"),
74
+ ("norm.bias", "layernorm.bias"),
75
+ ]
76
+ )
77
+ else:
78
+ # layernorm + classification head
79
+ rename_keys.extend(
80
+ [
81
+ ("fc_norm.weight", "beit.pooler.layernorm.weight"),
82
+ ("fc_norm.bias", "beit.pooler.layernorm.bias"),
83
+ ("head.weight", "classifier.weight"),
84
+ ("head.bias", "classifier.bias"),
85
+ ]
86
+ )
87
+
88
+ return rename_keys
89
+
90
+
91
+ # we split up the matrix of each encoder layer into queries, keys and values
92
+ def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
93
+ for i in range(config.num_hidden_layers):
94
+ prefix = "backbone." if is_semantic else ""
95
+ # queries, keys and values
96
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
97
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
98
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
99
+
100
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
101
+ : config.hidden_size, :
102
+ ]
103
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
104
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
105
+ config.hidden_size : config.hidden_size * 2, :
106
+ ]
107
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
108
+ -config.hidden_size :, :
109
+ ]
110
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
111
+
112
+ # gamma_1 and gamma_2
113
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
114
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
115
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
116
+
117
+ state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
118
+ state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
119
+
120
+
121
+ def rename_key(dct, old, new):
122
+ val = dct.pop(old)
123
+ dct[new] = val
124
+
125
+
126
+ # We will verify our results on an image of cute cats
127
+ def prepare_img():
128
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
129
+ im = Image.open(requests.get(url, stream=True).raw)
130
+ return im
131
+
132
+
133
+ @torch.no_grad()
134
+ def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False):
135
+ """
136
+ Copy/paste/tweak model's weights to our BEiT structure.
137
+ """
138
+
139
+ # define default BEiT configuration
140
+ has_lm_head = False if "rvlcdip" in checkpoint_url else True
141
+ config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head)
142
+
143
+ # size of the architecture
144
+ if "large" in checkpoint_url or "dit-l" in checkpoint_url:
145
+ config.hidden_size = 1024
146
+ config.intermediate_size = 4096
147
+ config.num_hidden_layers = 24
148
+ config.num_attention_heads = 16
149
+
150
+ # labels
151
+ if "rvlcdip" in checkpoint_url:
152
+ config.num_labels = 16
153
+ repo_id = "huggingface/label-files"
154
+ filename = "rvlcdip-id2label.json"
155
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
156
+ id2label = {int(k): v for k, v in id2label.items()}
157
+ config.id2label = id2label
158
+ config.label2id = {v: k for k, v in id2label.items()}
159
+
160
+ # load state_dict of original model, remove and rename some keys
161
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
162
+
163
+ rename_keys = create_rename_keys(config, has_lm_head=has_lm_head)
164
+ for src, dest in rename_keys:
165
+ rename_key(state_dict, src, dest)
166
+ read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head)
167
+
168
+ # load HuggingFace model
169
+ model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config)
170
+ model.eval()
171
+ model.load_state_dict(state_dict)
172
+
173
+ # Check outputs on an image
174
+ image_processor = BeitImageProcessor(
175
+ size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
176
+ )
177
+ image = prepare_img()
178
+
179
+ encoding = image_processor(images=image, return_tensors="pt")
180
+ pixel_values = encoding["pixel_values"]
181
+
182
+ outputs = model(pixel_values)
183
+ logits = outputs.logits
184
+
185
+ # verify logits
186
+ expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
187
+ assert logits.shape == torch.Size(expected_shape), "Shape of logits not as expected"
188
+
189
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
190
+ print(f"Saving model to {pytorch_dump_folder_path}")
191
+ model.save_pretrained(pytorch_dump_folder_path)
192
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
193
+ image_processor.save_pretrained(pytorch_dump_folder_path)
194
+
195
+ if push_to_hub:
196
+ if has_lm_head:
197
+ model_name = "dit-base" if "base" in checkpoint_url else "dit-large"
198
+ else:
199
+ model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
200
+ image_processor.push_to_hub(
201
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
202
+ organization="nielsr",
203
+ commit_message="Add image processor",
204
+ use_temp_dir=True,
205
+ )
206
+ model.push_to_hub(
207
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
208
+ organization="nielsr",
209
+ commit_message="Add model",
210
+ use_temp_dir=True,
211
+ )
212
+
213
+
214
+ if __name__ == "__main__":
215
+ parser = argparse.ArgumentParser()
216
+
217
+ parser.add_argument(
218
+ "--checkpoint_url",
219
+ default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
220
+ type=str,
221
+ help="URL to the original PyTorch checkpoint (.pth file).",
222
+ )
223
+ parser.add_argument(
224
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
225
+ )
226
+ parser.add_argument(
227
+ "--push_to_hub",
228
+ action="store_true",
229
+ )
230
+ args = parser.parse_args()
231
+ convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"]
28
+ _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_glpn"] = [
37
+ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "GLPNForDepthEstimation",
39
+ "GLPNLayer",
40
+ "GLPNModel",
41
+ "GLPNPreTrainedModel",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .feature_extraction_glpn import GLPNFeatureExtractor
55
+ from .image_processing_glpn import GLPNImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_glpn import (
64
+ GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ GLPNForDepthEstimation,
66
+ GLPNLayer,
67
+ GLPNModel,
68
+ GLPNPreTrainedModel,
69
+ )
70
+
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc ADDED
Binary file (994 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc ADDED
Binary file (9.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc ADDED
Binary file (23.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GLPN model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
25
+ # See all GLPN models at https://huggingface.co/models?filter=glpn
26
+ }
27
+
28
+
29
+ class GLPNConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the GLPN
34
+ [vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ num_channels (`int`, *optional*, defaults to 3):
41
+ The number of input channels.
42
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
43
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
44
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
45
+ The number of layers in each encoder block.
46
+ sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
47
+ Sequence reduction ratios in each encoder block.
48
+ hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
49
+ Dimension of each of the encoder blocks.
50
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
51
+ Patch size before each encoder block.
52
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
53
+ Stride before each encoder block.
54
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
55
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
56
+ mlp_ratios (`List[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
57
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
58
+ encoder blocks.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
62
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
65
+ The dropout ratio for the attention probabilities.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
70
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
71
+ The epsilon used by the layer normalization layers.
72
+ decoder_hidden_size (`int`, *optional*, defaults to 64):
73
+ The dimension of the decoder.
74
+ max_depth (`int`, *optional*, defaults to 10):
75
+ The maximum depth of the decoder.
76
+ head_in_index (`int`, *optional*, defaults to -1):
77
+ The index of the features to use in the head.
78
+
79
+ Example:
80
+
81
+ ```python
82
+ >>> from transformers import GLPNModel, GLPNConfig
83
+
84
+ >>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
85
+ >>> configuration = GLPNConfig()
86
+
87
+ >>> # Initializing a model from the vinvino02/glpn-kitti style configuration
88
+ >>> model = GLPNModel(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "glpn"
95
+
96
+ def __init__(
97
+ self,
98
+ num_channels=3,
99
+ num_encoder_blocks=4,
100
+ depths=[2, 2, 2, 2],
101
+ sr_ratios=[8, 4, 2, 1],
102
+ hidden_sizes=[32, 64, 160, 256],
103
+ patch_sizes=[7, 3, 3, 3],
104
+ strides=[4, 2, 2, 2],
105
+ num_attention_heads=[1, 2, 5, 8],
106
+ mlp_ratios=[4, 4, 4, 4],
107
+ hidden_act="gelu",
108
+ hidden_dropout_prob=0.0,
109
+ attention_probs_dropout_prob=0.0,
110
+ initializer_range=0.02,
111
+ drop_path_rate=0.1,
112
+ layer_norm_eps=1e-6,
113
+ decoder_hidden_size=64,
114
+ max_depth=10,
115
+ head_in_index=-1,
116
+ **kwargs,
117
+ ):
118
+ super().__init__(**kwargs)
119
+
120
+ self.num_channels = num_channels
121
+ self.num_encoder_blocks = num_encoder_blocks
122
+ self.depths = depths
123
+ self.sr_ratios = sr_ratios
124
+ self.hidden_sizes = hidden_sizes
125
+ self.patch_sizes = patch_sizes
126
+ self.strides = strides
127
+ self.mlp_ratios = mlp_ratios
128
+ self.num_attention_heads = num_attention_heads
129
+ self.hidden_act = hidden_act
130
+ self.hidden_dropout_prob = hidden_dropout_prob
131
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
132
+ self.initializer_range = initializer_range
133
+ self.drop_path_rate = drop_path_rate
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.decoder_hidden_size = decoder_hidden_size
136
+ self.max_depth = max_depth
137
+ self.head_in_index = head_in_index
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert GLPN checkpoints."""
16
+
17
+
18
+ import argparse
19
+ from collections import OrderedDict
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from PIL import Image
25
+
26
+ from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
27
+ from transformers.utils import logging
28
+
29
+
30
+ logging.set_verbosity_info()
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ def rename_keys(state_dict):
35
+ new_state_dict = OrderedDict()
36
+ for key, value in state_dict.items():
37
+ if key.startswith("module.encoder"):
38
+ key = key.replace("module.encoder", "glpn.encoder")
39
+ if key.startswith("module.decoder"):
40
+ key = key.replace("module.decoder", "decoder.stages")
41
+ if "patch_embed" in key:
42
+ # replace for example patch_embed1 by patch_embeddings.0
43
+ idx = key[key.find("patch_embed") + len("patch_embed")]
44
+ key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}")
45
+ if "norm" in key:
46
+ key = key.replace("norm", "layer_norm")
47
+ if "glpn.encoder.layer_norm" in key:
48
+ # replace for example layer_norm1 by layer_norm.0
49
+ idx = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
50
+ key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}")
51
+ if "layer_norm1" in key:
52
+ key = key.replace("layer_norm1", "layer_norm_1")
53
+ if "layer_norm2" in key:
54
+ key = key.replace("layer_norm2", "layer_norm_2")
55
+ if "block" in key:
56
+ # replace for example block1 by block.0
57
+ idx = key[key.find("block") + len("block")]
58
+ key = key.replace(f"block{idx}", f"block.{int(idx)-1}")
59
+ if "attn.q" in key:
60
+ key = key.replace("attn.q", "attention.self.query")
61
+ if "attn.proj" in key:
62
+ key = key.replace("attn.proj", "attention.output.dense")
63
+ if "attn" in key:
64
+ key = key.replace("attn", "attention.self")
65
+ if "fc1" in key:
66
+ key = key.replace("fc1", "dense1")
67
+ if "fc2" in key:
68
+ key = key.replace("fc2", "dense2")
69
+ if "linear_pred" in key:
70
+ key = key.replace("linear_pred", "classifier")
71
+ if "linear_fuse" in key:
72
+ key = key.replace("linear_fuse.conv", "linear_fuse")
73
+ key = key.replace("linear_fuse.bn", "batch_norm")
74
+ if "linear_c" in key:
75
+ # replace for example linear_c4 by linear_c.3
76
+ idx = key[key.find("linear_c") + len("linear_c")]
77
+ key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}")
78
+ if "bot_conv" in key:
79
+ key = key.replace("bot_conv", "0.convolution")
80
+ if "skip_conv1" in key:
81
+ key = key.replace("skip_conv1", "1.convolution")
82
+ if "skip_conv2" in key:
83
+ key = key.replace("skip_conv2", "2.convolution")
84
+ if "fusion1" in key:
85
+ key = key.replace("fusion1", "1.fusion")
86
+ if "fusion2" in key:
87
+ key = key.replace("fusion2", "2.fusion")
88
+ if "fusion3" in key:
89
+ key = key.replace("fusion3", "3.fusion")
90
+ if "fusion" in key and "conv" in key:
91
+ key = key.replace("conv", "convolutional_layer")
92
+ if key.startswith("module.last_layer_depth"):
93
+ key = key.replace("module.last_layer_depth", "head.head")
94
+ new_state_dict[key] = value
95
+
96
+ return new_state_dict
97
+
98
+
99
+ def read_in_k_v(state_dict, config):
100
+ # for each of the encoder blocks:
101
+ for i in range(config.num_encoder_blocks):
102
+ for j in range(config.depths[i]):
103
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
104
+ kv_weight = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight")
105
+ kv_bias = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias")
106
+ # next, add keys and values (in that order) to the state dict
107
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[
108
+ : config.hidden_sizes[i], :
109
+ ]
110
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
111
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
112
+ config.hidden_sizes[i] :, :
113
+ ]
114
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
115
+
116
+
117
+ # We will verify our results on a COCO image
118
+ def prepare_img():
119
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
120
+ image = Image.open(requests.get(url, stream=True).raw)
121
+
122
+ return image
123
+
124
+
125
+ @torch.no_grad()
126
+ def convert_glpn_checkpoint(checkpoint_path, pytorch_dump_folder_path, push_to_hub=False, model_name=None):
127
+ """
128
+ Copy/paste/tweak model's weights to our GLPN structure.
129
+ """
130
+
131
+ # load GLPN configuration (Segformer-B4 size)
132
+ config = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
133
+
134
+ # load image processor (only resize + rescale)
135
+ image_processor = GLPNImageProcessor()
136
+
137
+ # prepare image
138
+ image = prepare_img()
139
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
140
+
141
+ logger.info("Converting model...")
142
+
143
+ # load original state dict
144
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
145
+
146
+ # rename keys
147
+ state_dict = rename_keys(state_dict)
148
+
149
+ # key and value matrices need special treatment
150
+ read_in_k_v(state_dict, config)
151
+
152
+ # create HuggingFace model and load state dict
153
+ model = GLPNForDepthEstimation(config)
154
+ model.load_state_dict(state_dict)
155
+ model.eval()
156
+
157
+ # forward pass
158
+ outputs = model(pixel_values)
159
+ predicted_depth = outputs.predicted_depth
160
+
161
+ # verify output
162
+ if model_name is not None:
163
+ if "nyu" in model_name:
164
+ expected_slice = torch.tensor(
165
+ [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]
166
+ )
167
+ elif "kitti" in model_name:
168
+ expected_slice = torch.tensor(
169
+ [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
170
+ )
171
+ else:
172
+ raise ValueError(f"Unknown model name: {model_name}")
173
+
174
+ expected_shape = torch.Size([1, 480, 640])
175
+
176
+ assert predicted_depth.shape == expected_shape
177
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
178
+ print("Looks ok!")
179
+
180
+ # finally, push to hub if required
181
+ if push_to_hub:
182
+ logger.info("Pushing model and image processor to the hub...")
183
+ model.push_to_hub(
184
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
185
+ organization="nielsr",
186
+ commit_message="Add model",
187
+ use_temp_dir=True,
188
+ )
189
+ image_processor.push_to_hub(
190
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
191
+ organization="nielsr",
192
+ commit_message="Add image processor",
193
+ use_temp_dir=True,
194
+ )
195
+
196
+
197
+ if __name__ == "__main__":
198
+ parser = argparse.ArgumentParser()
199
+
200
+ parser.add_argument(
201
+ "--checkpoint_path",
202
+ default=None,
203
+ type=str,
204
+ help="Path to the original PyTorch checkpoint (.pth file).",
205
+ )
206
+ parser.add_argument(
207
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
208
+ )
209
+ parser.add_argument(
210
+ "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
211
+ )
212
+ parser.add_argument(
213
+ "--model_name",
214
+ default="glpn-kitti",
215
+ type=str,
216
+ help="Name of the model in case you're pushing to the hub.",
217
+ )
218
+ args = parser.parse_args()
219
+ convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for GLPN."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_glpn import GLPNImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class GLPNFeatureExtractor(GLPNImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use GLPNImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for GLPN."""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
23
+ from ...image_transforms import resize, to_channel_dimension_format
24
+ from ...image_utils import (
25
+ ChannelDimension,
26
+ PILImageResampling,
27
+ get_image_size,
28
+ infer_channel_dimension_format,
29
+ is_scaled_image,
30
+ make_list_of_images,
31
+ to_numpy_array,
32
+ valid_images,
33
+ validate_kwargs,
34
+ validate_preprocess_arguments,
35
+ )
36
+ from ...utils import TensorType, logging
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class GLPNImageProcessor(BaseImageProcessor):
43
+ r"""
44
+ Constructs a GLPN image processor.
45
+
46
+ Args:
47
+ do_resize (`bool`, *optional*, defaults to `True`):
48
+ Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
49
+ `size_divisor`. Can be overridden by `do_resize` in `preprocess`.
50
+ size_divisor (`int`, *optional*, defaults to 32):
51
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
52
+ multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
53
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
54
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
55
+ do_rescale (`bool`, *optional*, defaults to `True`):
56
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
57
+ overridden by `do_rescale` in `preprocess`.
58
+ """
59
+
60
+ model_input_names = ["pixel_values"]
61
+
62
+ def __init__(
63
+ self,
64
+ do_resize: bool = True,
65
+ size_divisor: int = 32,
66
+ resample=PILImageResampling.BILINEAR,
67
+ do_rescale: bool = True,
68
+ **kwargs,
69
+ ) -> None:
70
+ self.do_resize = do_resize
71
+ self.do_rescale = do_rescale
72
+ self.size_divisor = size_divisor
73
+ self.resample = resample
74
+ super().__init__(**kwargs)
75
+ self._valid_processor_keys = [
76
+ "images",
77
+ "do_resize",
78
+ "size_divisor",
79
+ "resample",
80
+ "do_rescale",
81
+ "return_tensors",
82
+ "data_format",
83
+ "input_data_format",
84
+ ]
85
+
86
+ def resize(
87
+ self,
88
+ image: np.ndarray,
89
+ size_divisor: int,
90
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
91
+ data_format: Optional[ChannelDimension] = None,
92
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
93
+ **kwargs,
94
+ ) -> np.ndarray:
95
+ """
96
+ Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
97
+
98
+ If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
99
+
100
+ Args:
101
+ image (`np.ndarray`):
102
+ The image to resize.
103
+ size_divisor (`int`):
104
+ The image is resized so its height and width are rounded down to the closest multiple of
105
+ `size_divisor`.
106
+ resample:
107
+ `PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
108
+ data_format (`ChannelDimension` or `str`, *optional*):
109
+ The channel dimension format for the output image. If `None`, the channel dimension format of the input
110
+ image is used. Can be one of:
111
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
112
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
113
+ input_data_format (`ChannelDimension` or `str`, *optional*):
114
+ The channel dimension format of the input image. If not set, the channel dimension format is inferred
115
+ from the input image. Can be one of:
116
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
117
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
118
+
119
+ Returns:
120
+ `np.ndarray`: The resized image.
121
+ """
122
+ height, width = get_image_size(image, channel_dim=input_data_format)
123
+ # Rounds the height and width down to the closest multiple of size_divisor
124
+ new_h = height // size_divisor * size_divisor
125
+ new_w = width // size_divisor * size_divisor
126
+ image = resize(
127
+ image,
128
+ (new_h, new_w),
129
+ resample=resample,
130
+ data_format=data_format,
131
+ input_data_format=input_data_format,
132
+ **kwargs,
133
+ )
134
+ return image
135
+
136
+ def preprocess(
137
+ self,
138
+ images: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]],
139
+ do_resize: Optional[bool] = None,
140
+ size_divisor: Optional[int] = None,
141
+ resample=None,
142
+ do_rescale: Optional[bool] = None,
143
+ return_tensors: Optional[Union[TensorType, str]] = None,
144
+ data_format: ChannelDimension = ChannelDimension.FIRST,
145
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
146
+ **kwargs,
147
+ ) -> BatchFeature:
148
+ """
149
+ Preprocess the given images.
150
+
151
+ Args:
152
+ images (`PIL.Image.Image` or `TensorType` or `List[np.ndarray]` or `List[TensorType]`):
153
+ Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
154
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
155
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
156
+ Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
157
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
158
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the
159
+ closest multiple of `size_divisor`.
160
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
161
+ `PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
162
+ an effect if `do_resize` is set to `True`.
163
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
164
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
165
+ return_tensors (`str` or `TensorType`, *optional*):
166
+ The type of tensors to return. Can be one of:
167
+ - `None`: Return a list of `np.ndarray`.
168
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
169
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
170
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
171
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
172
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
173
+ The channel dimension format for the output image. Can be one of:
174
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
175
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
176
+ input_data_format (`ChannelDimension` or `str`, *optional*):
177
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
178
+ from the input image. Can be one of:
179
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
180
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
181
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
182
+ """
183
+ do_resize = do_resize if do_resize is not None else self.do_resize
184
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
185
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
186
+ resample = resample if resample is not None else self.resample
187
+
188
+ images = make_list_of_images(images)
189
+
190
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
191
+
192
+ if not valid_images(images):
193
+ raise ValueError(
194
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
195
+ "torch.Tensor, tf.Tensor or jax.ndarray."
196
+ )
197
+
198
+ # Here, the rescale() method uses a constant rescale_factor. It does not need to be validated
199
+ # with a rescale_factor.
200
+ validate_preprocess_arguments(
201
+ do_resize=do_resize,
202
+ size=size_divisor, # Here, size_divisor is used as a parameter for optimal resizing instead of size.
203
+ resample=resample,
204
+ )
205
+
206
+ # All transformations expect numpy arrays.
207
+ images = [to_numpy_array(img) for img in images]
208
+
209
+ if is_scaled_image(images[0]) and do_rescale:
210
+ logger.warning_once(
211
+ "It looks like you are trying to rescale already rescaled images. If the input"
212
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
213
+ )
214
+
215
+ if input_data_format is None:
216
+ # We assume that all images have the same channel dimension format.
217
+ input_data_format = infer_channel_dimension_format(images[0])
218
+
219
+ if do_resize:
220
+ images = [
221
+ self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
222
+ for image in images
223
+ ]
224
+
225
+ if do_rescale:
226
+ images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images]
227
+
228
+ images = [
229
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
230
+ ]
231
+
232
+ data = {"pixel_values": images}
233
+ return BatchFeature(data=data, tensor_type=return_tensors)
env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GLPN model."""
16
+
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import (
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_glpn import GLPNConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ # General docstring
43
+ _CONFIG_FOR_DOC = "GLPNConfig"
44
+
45
+ # Base docstring
46
+ _CHECKPOINT_FOR_DOC = "vinvino02/glpn-kitti"
47
+ _EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20]
48
+
49
+ GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = [
50
+ "vinvino02/glpn-kitti",
51
+ # See all GLPN models at https://huggingface.co/models?filter=glpn
52
+ ]
53
+
54
+
55
+ # Copied from transformers.models.beit.modeling_beit.drop_path
56
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
57
+ """
58
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
59
+
60
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
61
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
62
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
63
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
64
+ argument.
65
+ """
66
+ if drop_prob == 0.0 or not training:
67
+ return input
68
+ keep_prob = 1 - drop_prob
69
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
70
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
71
+ random_tensor.floor_() # binarize
72
+ output = input.div(keep_prob) * random_tensor
73
+ return output
74
+
75
+
76
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerDropPath
77
+ class GLPNDropPath(nn.Module):
78
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
79
+
80
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
81
+ super().__init__()
82
+ self.drop_prob = drop_prob
83
+
84
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
85
+ return drop_path(hidden_states, self.drop_prob, self.training)
86
+
87
+ def extra_repr(self) -> str:
88
+ return "p={}".format(self.drop_prob)
89
+
90
+
91
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerOverlapPatchEmbeddings
92
+ class GLPNOverlapPatchEmbeddings(nn.Module):
93
+ """Construct the overlapping patch embeddings."""
94
+
95
+ def __init__(self, patch_size, stride, num_channels, hidden_size):
96
+ super().__init__()
97
+ self.proj = nn.Conv2d(
98
+ num_channels,
99
+ hidden_size,
100
+ kernel_size=patch_size,
101
+ stride=stride,
102
+ padding=patch_size // 2,
103
+ )
104
+
105
+ self.layer_norm = nn.LayerNorm(hidden_size)
106
+
107
+ def forward(self, pixel_values):
108
+ embeddings = self.proj(pixel_values)
109
+ _, _, height, width = embeddings.shape
110
+ # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels)
111
+ # this can be fed to a Transformer layer
112
+ embeddings = embeddings.flatten(2).transpose(1, 2)
113
+ embeddings = self.layer_norm(embeddings)
114
+ return embeddings, height, width
115
+
116
+
117
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention
118
+ class GLPNEfficientSelfAttention(nn.Module):
119
+ """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
120
+ paper](https://arxiv.org/abs/2102.12122)."""
121
+
122
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
123
+ super().__init__()
124
+ self.hidden_size = hidden_size
125
+ self.num_attention_heads = num_attention_heads
126
+
127
+ if self.hidden_size % self.num_attention_heads != 0:
128
+ raise ValueError(
129
+ f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
130
+ f"heads ({self.num_attention_heads})"
131
+ )
132
+
133
+ self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
134
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
135
+
136
+ self.query = nn.Linear(self.hidden_size, self.all_head_size)
137
+ self.key = nn.Linear(self.hidden_size, self.all_head_size)
138
+ self.value = nn.Linear(self.hidden_size, self.all_head_size)
139
+
140
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
141
+
142
+ self.sr_ratio = sequence_reduction_ratio
143
+ if sequence_reduction_ratio > 1:
144
+ self.sr = nn.Conv2d(
145
+ hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio
146
+ )
147
+ self.layer_norm = nn.LayerNorm(hidden_size)
148
+
149
+ def transpose_for_scores(self, hidden_states):
150
+ new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
151
+ hidden_states = hidden_states.view(new_shape)
152
+ return hidden_states.permute(0, 2, 1, 3)
153
+
154
+ def forward(
155
+ self,
156
+ hidden_states,
157
+ height,
158
+ width,
159
+ output_attentions=False,
160
+ ):
161
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
162
+
163
+ if self.sr_ratio > 1:
164
+ batch_size, seq_len, num_channels = hidden_states.shape
165
+ # Reshape to (batch_size, num_channels, height, width)
166
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
167
+ # Apply sequence reduction
168
+ hidden_states = self.sr(hidden_states)
169
+ # Reshape back to (batch_size, seq_len, num_channels)
170
+ hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
171
+ hidden_states = self.layer_norm(hidden_states)
172
+
173
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
174
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
175
+
176
+ # Take the dot product between "query" and "key" to get the raw attention scores.
177
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
178
+
179
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
180
+
181
+ # Normalize the attention scores to probabilities.
182
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
183
+
184
+ # This is actually dropping out entire tokens to attend to, which might
185
+ # seem a bit unusual, but is taken from the original Transformer paper.
186
+ attention_probs = self.dropout(attention_probs)
187
+
188
+ context_layer = torch.matmul(attention_probs, value_layer)
189
+
190
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
191
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
192
+ context_layer = context_layer.view(new_context_layer_shape)
193
+
194
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
195
+
196
+ return outputs
197
+
198
+
199
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerSelfOutput
200
+ class GLPNSelfOutput(nn.Module):
201
+ def __init__(self, config, hidden_size):
202
+ super().__init__()
203
+ self.dense = nn.Linear(hidden_size, hidden_size)
204
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
205
+
206
+ def forward(self, hidden_states, input_tensor):
207
+ hidden_states = self.dense(hidden_states)
208
+ hidden_states = self.dropout(hidden_states)
209
+ return hidden_states
210
+
211
+
212
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerAttention with Segformer->GLPN
213
+ class GLPNAttention(nn.Module):
214
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
215
+ super().__init__()
216
+ self.self = GLPNEfficientSelfAttention(
217
+ config=config,
218
+ hidden_size=hidden_size,
219
+ num_attention_heads=num_attention_heads,
220
+ sequence_reduction_ratio=sequence_reduction_ratio,
221
+ )
222
+ self.output = GLPNSelfOutput(config, hidden_size=hidden_size)
223
+ self.pruned_heads = set()
224
+
225
+ def prune_heads(self, heads):
226
+ if len(heads) == 0:
227
+ return
228
+ heads, index = find_pruneable_heads_and_indices(
229
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
230
+ )
231
+
232
+ # Prune linear layers
233
+ self.self.query = prune_linear_layer(self.self.query, index)
234
+ self.self.key = prune_linear_layer(self.self.key, index)
235
+ self.self.value = prune_linear_layer(self.self.value, index)
236
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
237
+
238
+ # Update hyper params and store pruned heads
239
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
240
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
241
+ self.pruned_heads = self.pruned_heads.union(heads)
242
+
243
+ def forward(self, hidden_states, height, width, output_attentions=False):
244
+ self_outputs = self.self(hidden_states, height, width, output_attentions)
245
+
246
+ attention_output = self.output(self_outputs[0], hidden_states)
247
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
248
+ return outputs
249
+
250
+
251
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerDWConv
252
+ class GLPNDWConv(nn.Module):
253
+ def __init__(self, dim=768):
254
+ super().__init__()
255
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
256
+
257
+ def forward(self, hidden_states, height, width):
258
+ batch_size, seq_len, num_channels = hidden_states.shape
259
+ hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
260
+ hidden_states = self.dwconv(hidden_states)
261
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
262
+
263
+ return hidden_states
264
+
265
+
266
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerMixFFN with Segformer->GLPN
267
+ class GLPNMixFFN(nn.Module):
268
+ def __init__(self, config, in_features, hidden_features=None, out_features=None):
269
+ super().__init__()
270
+ out_features = out_features or in_features
271
+ self.dense1 = nn.Linear(in_features, hidden_features)
272
+ self.dwconv = GLPNDWConv(hidden_features)
273
+ if isinstance(config.hidden_act, str):
274
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
275
+ else:
276
+ self.intermediate_act_fn = config.hidden_act
277
+ self.dense2 = nn.Linear(hidden_features, out_features)
278
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
279
+
280
+ def forward(self, hidden_states, height, width):
281
+ hidden_states = self.dense1(hidden_states)
282
+ hidden_states = self.dwconv(hidden_states, height, width)
283
+ hidden_states = self.intermediate_act_fn(hidden_states)
284
+ hidden_states = self.dropout(hidden_states)
285
+ hidden_states = self.dense2(hidden_states)
286
+ hidden_states = self.dropout(hidden_states)
287
+ return hidden_states
288
+
289
+
290
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerLayer with Segformer->GLPN
291
+ class GLPNLayer(nn.Module):
292
+ """This corresponds to the Block class in the original implementation."""
293
+
294
+ def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
295
+ super().__init__()
296
+ self.layer_norm_1 = nn.LayerNorm(hidden_size)
297
+ self.attention = GLPNAttention(
298
+ config,
299
+ hidden_size=hidden_size,
300
+ num_attention_heads=num_attention_heads,
301
+ sequence_reduction_ratio=sequence_reduction_ratio,
302
+ )
303
+ self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
304
+ self.layer_norm_2 = nn.LayerNorm(hidden_size)
305
+ mlp_hidden_size = int(hidden_size * mlp_ratio)
306
+ self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
307
+
308
+ def forward(self, hidden_states, height, width, output_attentions=False):
309
+ self_attention_outputs = self.attention(
310
+ self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention
311
+ height,
312
+ width,
313
+ output_attentions=output_attentions,
314
+ )
315
+
316
+ attention_output = self_attention_outputs[0]
317
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
318
+
319
+ # first residual connection (with stochastic depth)
320
+ attention_output = self.drop_path(attention_output)
321
+ hidden_states = attention_output + hidden_states
322
+
323
+ mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
324
+
325
+ # second residual connection (with stochastic depth)
326
+ mlp_output = self.drop_path(mlp_output)
327
+ layer_output = mlp_output + hidden_states
328
+
329
+ outputs = (layer_output,) + outputs
330
+
331
+ return outputs
332
+
333
+
334
+ class GLPNEncoder(nn.Module):
335
+ def __init__(self, config):
336
+ super().__init__()
337
+ self.config = config
338
+
339
+ # stochastic depth decay rule
340
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
341
+
342
+ # patch embeddings
343
+ embeddings = []
344
+ for i in range(config.num_encoder_blocks):
345
+ embeddings.append(
346
+ GLPNOverlapPatchEmbeddings(
347
+ patch_size=config.patch_sizes[i],
348
+ stride=config.strides[i],
349
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
350
+ hidden_size=config.hidden_sizes[i],
351
+ )
352
+ )
353
+ self.patch_embeddings = nn.ModuleList(embeddings)
354
+
355
+ # Transformer blocks
356
+ blocks = []
357
+ cur = 0
358
+ for i in range(config.num_encoder_blocks):
359
+ # each block consists of layers
360
+ layers = []
361
+ if i != 0:
362
+ cur += config.depths[i - 1]
363
+ for j in range(config.depths[i]):
364
+ layers.append(
365
+ GLPNLayer(
366
+ config,
367
+ hidden_size=config.hidden_sizes[i],
368
+ num_attention_heads=config.num_attention_heads[i],
369
+ drop_path=dpr[cur + j],
370
+ sequence_reduction_ratio=config.sr_ratios[i],
371
+ mlp_ratio=config.mlp_ratios[i],
372
+ )
373
+ )
374
+ blocks.append(nn.ModuleList(layers))
375
+
376
+ self.block = nn.ModuleList(blocks)
377
+
378
+ # Layer norms
379
+ self.layer_norm = nn.ModuleList(
380
+ [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]
381
+ )
382
+
383
+ def forward(
384
+ self,
385
+ pixel_values,
386
+ output_attentions=False,
387
+ output_hidden_states=False,
388
+ return_dict=True,
389
+ ):
390
+ all_hidden_states = () if output_hidden_states else None
391
+ all_self_attentions = () if output_attentions else None
392
+
393
+ batch_size = pixel_values.shape[0]
394
+
395
+ hidden_states = pixel_values
396
+ for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
397
+ embedding_layer, block_layer, norm_layer = x
398
+ # first, obtain patch embeddings
399
+ hidden_states, height, width = embedding_layer(hidden_states)
400
+ # second, send embeddings through blocks
401
+ for i, blk in enumerate(block_layer):
402
+ layer_outputs = blk(hidden_states, height, width, output_attentions)
403
+ hidden_states = layer_outputs[0]
404
+ if output_attentions:
405
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
406
+ # third, apply layer norm
407
+ hidden_states = norm_layer(hidden_states)
408
+ # fourth, optionally reshape back to (batch_size, num_channels, height, width)
409
+ hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
410
+ if output_hidden_states:
411
+ all_hidden_states = all_hidden_states + (hidden_states,)
412
+
413
+ if not return_dict:
414
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
415
+ return BaseModelOutput(
416
+ last_hidden_state=hidden_states,
417
+ hidden_states=all_hidden_states,
418
+ attentions=all_self_attentions,
419
+ )
420
+
421
+
422
+ class GLPNPreTrainedModel(PreTrainedModel):
423
+ """
424
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
425
+ models.
426
+ """
427
+
428
+ config_class = GLPNConfig
429
+ base_model_prefix = "glpn"
430
+ main_input_name = "pixel_values"
431
+
432
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights
433
+ def _init_weights(self, module):
434
+ """Initialize the weights"""
435
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
436
+ # Slightly different from the TF version which uses truncated_normal for initialization
437
+ # cf https://github.com/pytorch/pytorch/pull/5617
438
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
439
+ if module.bias is not None:
440
+ module.bias.data.zero_()
441
+ elif isinstance(module, nn.Embedding):
442
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
443
+ if module.padding_idx is not None:
444
+ module.weight.data[module.padding_idx].zero_()
445
+ elif isinstance(module, nn.LayerNorm):
446
+ module.bias.data.zero_()
447
+ module.weight.data.fill_(1.0)
448
+
449
+
450
+ GLPN_START_DOCSTRING = r"""
451
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
452
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
453
+ behavior.
454
+
455
+ Parameters:
456
+ config ([`GLPNConfig`]): Model configuration class with all the parameters of the model.
457
+ Initializing with a config file does not load the weights associated with the model, only the
458
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
459
+ """
460
+
461
+ GLPN_INPUTS_DOCSTRING = r"""
462
+
463
+ Args:
464
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
465
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
466
+ [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details.
467
+
468
+ output_attentions (`bool`, *optional*):
469
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
470
+ tensors for more detail.
471
+ output_hidden_states (`bool`, *optional*):
472
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
473
+ more detail.
474
+ return_dict (`bool`, *optional*):
475
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
476
+ """
477
+
478
+
479
+ @add_start_docstrings(
480
+ "The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.",
481
+ GLPN_START_DOCSTRING,
482
+ )
483
+ class GLPNModel(GLPNPreTrainedModel):
484
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN
485
+ def __init__(self, config):
486
+ super().__init__(config)
487
+ self.config = config
488
+
489
+ # hierarchical Transformer encoder
490
+ self.encoder = GLPNEncoder(config)
491
+
492
+ # Initialize weights and apply final processing
493
+ self.post_init()
494
+
495
+ def _prune_heads(self, heads_to_prune):
496
+ """
497
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
498
+ class PreTrainedModel
499
+ """
500
+ for layer, heads in heads_to_prune.items():
501
+ self.encoder.layer[layer].attention.prune_heads(heads)
502
+
503
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
504
+ @add_code_sample_docstrings(
505
+ checkpoint=_CHECKPOINT_FOR_DOC,
506
+ output_type=BaseModelOutput,
507
+ config_class=_CONFIG_FOR_DOC,
508
+ modality="vision",
509
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
510
+ )
511
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward
512
+ def forward(
513
+ self,
514
+ pixel_values: torch.FloatTensor,
515
+ output_attentions: Optional[bool] = None,
516
+ output_hidden_states: Optional[bool] = None,
517
+ return_dict: Optional[bool] = None,
518
+ ) -> Union[Tuple, BaseModelOutput]:
519
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
520
+ output_hidden_states = (
521
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
522
+ )
523
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
524
+
525
+ encoder_outputs = self.encoder(
526
+ pixel_values,
527
+ output_attentions=output_attentions,
528
+ output_hidden_states=output_hidden_states,
529
+ return_dict=return_dict,
530
+ )
531
+ sequence_output = encoder_outputs[0]
532
+
533
+ if not return_dict:
534
+ return (sequence_output,) + encoder_outputs[1:]
535
+
536
+ return BaseModelOutput(
537
+ last_hidden_state=sequence_output,
538
+ hidden_states=encoder_outputs.hidden_states,
539
+ attentions=encoder_outputs.attentions,
540
+ )
541
+
542
+
543
+ class GLPNSelectiveFeatureFusion(nn.Module):
544
+ """
545
+ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This
546
+ module adaptively selects and integrates local and global features by attaining an attention map for each feature.
547
+ """
548
+
549
+ def __init__(self, in_channel=64):
550
+ super().__init__()
551
+
552
+ self.convolutional_layer1 = nn.Sequential(
553
+ nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1),
554
+ nn.BatchNorm2d(in_channel),
555
+ nn.ReLU(),
556
+ )
557
+
558
+ self.convolutional_layer2 = nn.Sequential(
559
+ nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1),
560
+ nn.BatchNorm2d(int(in_channel / 2)),
561
+ nn.ReLU(),
562
+ )
563
+
564
+ self.convolutional_layer3 = nn.Conv2d(
565
+ in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1
566
+ )
567
+
568
+ self.sigmoid = nn.Sigmoid()
569
+
570
+ def forward(self, local_features, global_features):
571
+ # concatenate features along the channel dimension
572
+ features = torch.cat((local_features, global_features), dim=1)
573
+ # pass through convolutional layers
574
+ features = self.convolutional_layer1(features)
575
+ features = self.convolutional_layer2(features)
576
+ features = self.convolutional_layer3(features)
577
+ # apply sigmoid to get two-channel attention map
578
+ attn = self.sigmoid(features)
579
+ # construct hybrid features by adding element-wise
580
+ hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[
581
+ :, 1, :, :
582
+ ].unsqueeze(1)
583
+
584
+ return hybrid_features
585
+
586
+
587
+ class GLPNDecoderStage(nn.Module):
588
+ def __init__(self, in_channels, out_channels):
589
+ super().__init__()
590
+ should_skip = in_channels == out_channels
591
+ self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity()
592
+ self.fusion = GLPNSelectiveFeatureFusion(out_channels)
593
+ self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
594
+
595
+ def forward(self, hidden_state, residual=None):
596
+ hidden_state = self.convolution(hidden_state)
597
+ if residual is not None:
598
+ hidden_state = self.fusion(hidden_state, residual)
599
+ hidden_state = self.upsample(hidden_state)
600
+
601
+ return hidden_state
602
+
603
+ hidden_state = self.upsample(hidden_state)
604
+ return hidden_state
605
+
606
+
607
+ class GLPNDecoder(nn.Module):
608
+ def __init__(self, config):
609
+ super().__init__()
610
+ # we use features from end -> start
611
+ reserved_hidden_sizes = config.hidden_sizes[::-1]
612
+ out_channels = config.decoder_hidden_size
613
+
614
+ self.stages = nn.ModuleList(
615
+ [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes]
616
+ )
617
+ # don't fuse in first stage
618
+ self.stages[0].fusion = None
619
+
620
+ self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
621
+
622
+ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]:
623
+ stage_hidden_states = []
624
+ stage_hidden_state = None
625
+ for hidden_state, stage in zip(hidden_states[::-1], self.stages):
626
+ stage_hidden_state = stage(hidden_state, stage_hidden_state)
627
+ stage_hidden_states.append(stage_hidden_state)
628
+
629
+ stage_hidden_states[-1] = self.final_upsample(stage_hidden_state)
630
+
631
+ return stage_hidden_states
632
+
633
+
634
+ class SiLogLoss(nn.Module):
635
+ r"""
636
+ Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283).
637
+
638
+ $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
639
+ y_{i}^{*}$.
640
+
641
+ """
642
+
643
+ def __init__(self, lambd=0.5):
644
+ super().__init__()
645
+ self.lambd = lambd
646
+
647
+ def forward(self, pred, target):
648
+ valid_mask = (target > 0).detach()
649
+ diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
650
+ loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
651
+
652
+ return loss
653
+
654
+
655
+ class GLPNDepthEstimationHead(nn.Module):
656
+ def __init__(self, config):
657
+ super().__init__()
658
+
659
+ self.config = config
660
+
661
+ channels = config.decoder_hidden_size
662
+ self.head = nn.Sequential(
663
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1),
664
+ nn.ReLU(inplace=False),
665
+ nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1),
666
+ )
667
+
668
+ def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor:
669
+ # use last features of the decoder
670
+ hidden_states = hidden_states[self.config.head_in_index]
671
+
672
+ hidden_states = self.head(hidden_states)
673
+
674
+ predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth
675
+ predicted_depth = predicted_depth.squeeze(dim=1)
676
+
677
+ return predicted_depth
678
+
679
+
680
+ @add_start_docstrings(
681
+ """GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.""",
682
+ GLPN_START_DOCSTRING,
683
+ )
684
+ class GLPNForDepthEstimation(GLPNPreTrainedModel):
685
+ def __init__(self, config):
686
+ super().__init__(config)
687
+
688
+ self.glpn = GLPNModel(config)
689
+ self.decoder = GLPNDecoder(config)
690
+ self.head = GLPNDepthEstimationHead(config)
691
+
692
+ # Initialize weights and apply final processing
693
+ self.post_init()
694
+
695
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
696
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
697
+ def forward(
698
+ self,
699
+ pixel_values: torch.FloatTensor,
700
+ labels: Optional[torch.FloatTensor] = None,
701
+ output_attentions: Optional[bool] = None,
702
+ output_hidden_states: Optional[bool] = None,
703
+ return_dict: Optional[bool] = None,
704
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
705
+ r"""
706
+ labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
707
+ Ground truth depth estimation maps for computing the loss.
708
+
709
+ Returns:
710
+
711
+ Examples:
712
+
713
+ ```python
714
+ >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
715
+ >>> import torch
716
+ >>> import numpy as np
717
+ >>> from PIL import Image
718
+ >>> import requests
719
+
720
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
721
+ >>> image = Image.open(requests.get(url, stream=True).raw)
722
+
723
+ >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
724
+ >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
725
+
726
+ >>> # prepare image for the model
727
+ >>> inputs = image_processor(images=image, return_tensors="pt")
728
+
729
+ >>> with torch.no_grad():
730
+ ... outputs = model(**inputs)
731
+ ... predicted_depth = outputs.predicted_depth
732
+
733
+ >>> # interpolate to original size
734
+ >>> prediction = torch.nn.functional.interpolate(
735
+ ... predicted_depth.unsqueeze(1),
736
+ ... size=image.size[::-1],
737
+ ... mode="bicubic",
738
+ ... align_corners=False,
739
+ ... )
740
+
741
+ >>> # visualize the prediction
742
+ >>> output = prediction.squeeze().cpu().numpy()
743
+ >>> formatted = (output * 255 / np.max(output)).astype("uint8")
744
+ >>> depth = Image.fromarray(formatted)
745
+ ```"""
746
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
747
+ output_hidden_states = (
748
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
749
+ )
750
+
751
+ outputs = self.glpn(
752
+ pixel_values,
753
+ output_attentions=output_attentions,
754
+ output_hidden_states=True, # we need the intermediate hidden states
755
+ return_dict=return_dict,
756
+ )
757
+
758
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
759
+
760
+ out = self.decoder(hidden_states)
761
+ predicted_depth = self.head(out)
762
+
763
+ loss = None
764
+ if labels is not None:
765
+ loss_fct = SiLogLoss()
766
+ loss = loss_fct(predicted_depth, labels)
767
+
768
+ if not return_dict:
769
+ if output_hidden_states:
770
+ output = (predicted_depth,) + outputs[1:]
771
+ else:
772
+ output = (predicted_depth,) + outputs[2:]
773
+ return ((loss,) + output) if loss is not None else output
774
+
775
+ return DepthEstimatorOutput(
776
+ loss=loss,
777
+ predicted_depth=predicted_depth,
778
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
779
+ attentions=outputs.attentions,
780
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc ADDED
Binary file (45.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc ADDED
Binary file (51.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc ADDED
Binary file (7.13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018, Hao Tan, Mohit Bansal
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LXMERT model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
25
+ "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
26
+ }
27
+
28
+
29
+ class LxmertConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used
32
+ to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
33
+ a configuration with the defaults will yield a similar configuration to that of the Lxmert
34
+ [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 30522):
42
+ Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_attention_heads (`int`, *optional*, defaults to 12):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_qa_labels (`int`, *optional*, defaults to 9500):
49
+ This represents the total number of different question answering (QA) labels there are. If using more than
50
+ one dataset with QA, the user will need to account for the total number of labels that all of the datasets
51
+ have in total.
52
+ num_object_labels (`int`, *optional*, defaults to 1600):
53
+ This represents the total number of semantically unique objects that lxmert will be able to classify a
54
+ pooled-object feature as belonging too.
55
+ num_attr_labels (`int`, *optional*, defaults to 400):
56
+ This represents the total number of semantically unique attributes that lxmert will be able to classify a
57
+ pooled-object feature as possessing.
58
+ intermediate_size (`int`, *optional*, defaults to 3072):
59
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
60
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
61
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
62
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
63
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
64
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
65
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the attention probabilities.
67
+ max_position_embeddings (`int`, *optional*, defaults to 512):
68
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
69
+ just in case (e.g., 512 or 1024 or 2048).
70
+ type_vocab_size (`int`, *optional*, defaults to 2):
71
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
75
+ The epsilon used by the layer normalization layers.
76
+ l_layers (`int`, *optional*, defaults to 9):
77
+ Number of hidden layers in the Transformer language encoder.
78
+ x_layers (`int`, *optional*, defaults to 5):
79
+ Number of hidden layers in the Transformer cross modality encoder.
80
+ r_layers (`int`, *optional*, defaults to 5):
81
+ Number of hidden layers in the Transformer visual encoder.
82
+ visual_feat_dim (`int`, *optional*, defaults to 2048):
83
+ This represents the last dimension of the pooled-object features used as input for the model, representing
84
+ the size of each object feature itself.
85
+ visual_pos_dim (`int`, *optional*, defaults to 4):
86
+ This represents the number of spacial features that are mixed into the visual features. The default is set
87
+ to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
88
+ visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
89
+ This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
90
+ decided to train with multiple vision-based loss objectives.
91
+ task_matched (`bool`, *optional*, defaults to `True`):
92
+ This task is used for sentence-image matching. If the sentence correctly describes the image the label will
93
+ be 1. If the sentence does not correctly describe the image, the label will be 0.
94
+ task_mask_lm (`bool`, *optional*, defaults to `True`):
95
+ Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
96
+ objective.
97
+ task_obj_predict (`bool`, *optional*, defaults to `True`):
98
+ Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
99
+ task_qa (`bool`, *optional*, defaults to `True`):
100
+ Whether or not to add the question-answering loss to the objective
101
+ visual_obj_loss (`bool`, *optional*, defaults to `True`):
102
+ Whether or not to calculate the object-prediction loss objective
103
+ visual_attr_loss (`bool`, *optional*, defaults to `True`):
104
+ Whether or not to calculate the attribute-prediction loss objective
105
+ visual_feat_loss (`bool`, *optional*, defaults to `True`):
106
+ Whether or not to calculate the feature-regression loss objective
107
+ """
108
+
109
+ model_type = "lxmert"
110
+ attribute_map = {}
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_size=30522,
115
+ hidden_size=768,
116
+ num_attention_heads=12,
117
+ num_qa_labels=9500,
118
+ num_object_labels=1600,
119
+ num_attr_labels=400,
120
+ intermediate_size=3072,
121
+ hidden_act="gelu",
122
+ hidden_dropout_prob=0.1,
123
+ attention_probs_dropout_prob=0.1,
124
+ max_position_embeddings=512,
125
+ type_vocab_size=2,
126
+ initializer_range=0.02,
127
+ layer_norm_eps=1e-12,
128
+ l_layers=9,
129
+ x_layers=5,
130
+ r_layers=5,
131
+ visual_feat_dim=2048,
132
+ visual_pos_dim=4,
133
+ visual_loss_normalizer=6.67,
134
+ task_matched=True,
135
+ task_mask_lm=True,
136
+ task_obj_predict=True,
137
+ task_qa=True,
138
+ visual_obj_loss=True,
139
+ visual_attr_loss=True,
140
+ visual_feat_loss=True,
141
+ **kwargs,
142
+ ):
143
+ self.vocab_size = vocab_size
144
+ self.hidden_size = hidden_size
145
+ self.num_attention_heads = num_attention_heads
146
+ self.hidden_act = hidden_act
147
+ self.intermediate_size = intermediate_size
148
+ self.hidden_dropout_prob = hidden_dropout_prob
149
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
150
+ self.max_position_embeddings = max_position_embeddings
151
+ self.type_vocab_size = type_vocab_size
152
+ self.initializer_range = initializer_range
153
+ self.layer_norm_eps = layer_norm_eps
154
+ self.num_qa_labels = num_qa_labels
155
+ self.num_object_labels = num_object_labels
156
+ self.num_attr_labels = num_attr_labels
157
+ self.l_layers = l_layers
158
+ self.x_layers = x_layers
159
+ self.r_layers = r_layers
160
+ self.visual_feat_dim = visual_feat_dim
161
+ self.visual_pos_dim = visual_pos_dim
162
+ self.visual_loss_normalizer = visual_loss_normalizer
163
+ self.task_matched = task_matched
164
+ self.task_mask_lm = task_mask_lm
165
+ self.task_obj_predict = task_obj_predict
166
+ self.task_qa = task_qa
167
+ self.visual_obj_loss = visual_obj_loss
168
+ self.visual_attr_loss = visual_attr_loss
169
+ self.visual_feat_loss = visual_feat_loss
170
+ self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
171
+ super().__init__(**kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert LXMERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = LxmertConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = LxmertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_lxmert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ args = parser.parse_args()
60
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from .tokenization_lxmert import LxmertTokenizer
23
+
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+ PRETRAINED_VOCAB_FILES_MAP = {
28
+ "vocab_file": {
29
+ "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
30
+ },
31
+ "tokenizer_file": {
32
+ "unc-nlp/lxmert-base-uncased": (
33
+ "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
34
+ ),
35
+ },
36
+ }
37
+
38
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
39
+ "unc-nlp/lxmert-base-uncased": 512,
40
+ }
41
+
42
+ PRETRAINED_INIT_CONFIGURATION = {
43
+ "unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
44
+ }
45
+
46
+
47
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert
48
+ class LxmertTokenizerFast(PreTrainedTokenizerFast):
49
+ r"""
50
+ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
51
+
52
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
53
+ refer to this superclass for more information regarding those methods.
54
+
55
+ Args:
56
+ vocab_file (`str`):
57
+ File containing the vocabulary.
58
+ do_lower_case (`bool`, *optional*, defaults to `True`):
59
+ Whether or not to lowercase the input when tokenizing.
60
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
61
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
62
+ token instead.
63
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
64
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
65
+ sequence classification or for a text and a question for question answering. It is also used as the last
66
+ token of a sequence built with special tokens.
67
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
68
+ The token used for padding, for example when batching sequences of different lengths.
69
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
70
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
71
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
72
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
73
+ The token used for masking values. This is the token used when training this model with masked language
74
+ modeling. This is the token which the model will try to predict.
75
+ clean_text (`bool`, *optional*, defaults to `True`):
76
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
77
+ whitespaces by the classic one.
78
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
79
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
80
+ issue](https://github.com/huggingface/transformers/issues/328)).
81
+ strip_accents (`bool`, *optional*):
82
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
83
+ value for `lowercase` (as in the original Lxmert).
84
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
85
+ The prefix for subwords.
86
+ """
87
+
88
+ vocab_files_names = VOCAB_FILES_NAMES
89
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
90
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
91
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
92
+ slow_tokenizer_class = LxmertTokenizer
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_file=None,
97
+ tokenizer_file=None,
98
+ do_lower_case=True,
99
+ unk_token="[UNK]",
100
+ sep_token="[SEP]",
101
+ pad_token="[PAD]",
102
+ cls_token="[CLS]",
103
+ mask_token="[MASK]",
104
+ tokenize_chinese_chars=True,
105
+ strip_accents=None,
106
+ **kwargs,
107
+ ):
108
+ super().__init__(
109
+ vocab_file,
110
+ tokenizer_file=tokenizer_file,
111
+ do_lower_case=do_lower_case,
112
+ unk_token=unk_token,
113
+ sep_token=sep_token,
114
+ pad_token=pad_token,
115
+ cls_token=cls_token,
116
+ mask_token=mask_token,
117
+ tokenize_chinese_chars=tokenize_chinese_chars,
118
+ strip_accents=strip_accents,
119
+ **kwargs,
120
+ )
121
+
122
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
123
+ if (
124
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
125
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
126
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
127
+ ):
128
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
129
+ normalizer_state["lowercase"] = do_lower_case
130
+ normalizer_state["strip_accents"] = strip_accents
131
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
132
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
133
+
134
+ self.do_lower_case = do_lower_case
135
+
136
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
137
+ """
138
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
139
+ adding special tokens. A Lxmert sequence has the following format:
140
+
141
+ - single sequence: `[CLS] X [SEP]`
142
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
143
+
144
+ Args:
145
+ token_ids_0 (`List[int]`):
146
+ List of IDs to which the special tokens will be added.
147
+ token_ids_1 (`List[int]`, *optional*):
148
+ Optional second list of IDs for sequence pairs.
149
+
150
+ Returns:
151
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
152
+ """
153
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
154
+
155
+ if token_ids_1 is not None:
156
+ output += token_ids_1 + [self.sep_token_id]
157
+
158
+ return output
159
+
160
+ def create_token_type_ids_from_sequences(
161
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
162
+ ) -> List[int]:
163
+ """
164
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
165
+ pair mask has the following format:
166
+
167
+ ```
168
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
169
+ | first sequence | second sequence |
170
+ ```
171
+
172
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
173
+
174
+ Args:
175
+ token_ids_0 (`List[int]`):
176
+ List of IDs.
177
+ token_ids_1 (`List[int]`, *optional*):
178
+ Optional second list of IDs for sequence pairs.
179
+
180
+ Returns:
181
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
182
+ """
183
+ sep = [self.sep_token_id]
184
+ cls = [self.cls_token_id]
185
+ if token_ids_1 is None:
186
+ return len(cls + token_ids_0 + sep) * [0]
187
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
188
+
189
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
190
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
191
+ return tuple(files)
env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc ADDED
Binary file (6.1 kB). View file