applied-ai-018 commited on
Commit
f50f36b
·
verified ·
1 Parent(s): 04d7a84

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json +1 -0
  4. lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood +1 -0
  7. lm-evaluation-harness/tests/testdata/boolq-v1-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/cola-v0-loglikelihood +1 -0
  9. lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood +1 -0
  11. lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood +1 -0
  13. lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-loglikelihood +1 -0
  14. lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-loglikelihood +1 -0
  16. lm-evaluation-harness/tests/testdata/pile_hackernews-v1-loglikelihood_rolling +1 -0
  17. lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling +1 -0
  18. lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-greedy_until +1 -0
  20. lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-greedy_until +1 -0
  21. lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until +1 -0
  22. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py +109 -0
  23. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py +170 -0
  30. venv/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py +252 -0
  31. venv/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py +321 -0
  32. venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py +803 -0
  33. venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py +1193 -0
  34. venv/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py +73 -0
  35. venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py +211 -0
  42. venv/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py +134 -0
  43. venv/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py +736 -0
  44. venv/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py +358 -0
  45. venv/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py +694 -0
  46. venv/lib/python3.10/site-packages/transformers/models/hubert/__init__.py +83 -0
  47. venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b06a5e68fdff959c564fe2e0059f1ed8e30f41ec8853f4ddd1dab57c0969668
3
+ size 16778411
ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6978ce95f91738af50ffee8d0361627c695bf9f70b281e6efb4db8fb02a2d34
3
+ size 50332843
lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_distractor_agreement_relational_noun": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_distractor_agreement_relational_noun": 0}}
lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6cb36bbdae7754f8832f50872c3dd511ce12547e00fa0771deb747be3355eb85
lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_tough_vs_raising_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_tough_vs_raising_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d41a9b85e4c31e445bf9b46b8642df02203ccc02b4a9b254bf76066d5c54b4b7
lm-evaluation-harness/tests/testdata/boolq-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"boolq": {"acc": 0.5048929663608562, "acc_stderr": 0.00874463623355505}}, "versions": {"boolq": 1}}
lm-evaluation-harness/tests/testdata/cola-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ e8635578ed8ee70b707a666d35e468b9321db24470f80c92080651e2bfa01751
lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a197ccc8538231404a8e43f5ed0fbbfb2c317b4da337f6e7aa9642131aeb426a
lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ b85bc849811ccfa9971a6ee3fca7342752c314c0cb6f126e10d9ec4d0450c541
lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"gsm8k": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"gsm8k": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ bed1e47127cc2893c6aef63b9a0909cca31aa351a703da2a166b01cae03c3311
lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f99a3caece11169f2a5cc951001f92027104afd25d29b2a399883bd4bf118605
lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"iwslt17-en-ar": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0, "chrf_stderr": 0.0, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"iwslt17-en-ar": 0}}
lm-evaluation-harness/tests/testdata/lambada_mt_de-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5ad125e1708499832b2cee8c3388f89f9c0277010fd96fbd3359039ce8105984
lm-evaluation-harness/tests/testdata/pile_hackernews-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ ec1082ee5a5326e0d57aa4e73b634937140c1de9af95f154e8ab57b05d9b422b
lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 40b39d120d99a145690444e86acc3e3e24d41e6e0538a75e26929ad84926e5e0
lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_wikipedia": {"bits_per_byte": 0.00016834722287561703, "byte_perplexity": 1.0001683613940646, "word_perplexity": 1.001084677949439}}, "versions": {"pile_wikipedia": 0}}
lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 4be7fdda313394f19b5995b00ada1dfa3bb158ee1f020ef8d07ecea260fa60b2
lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ eb5365c46f22ffec9a157991627d6e1fd1117fccffaedfc73619e93bafb5a408
lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 67f0333ddbcb07d7a9ac12919129a18fe4fea24e4826a11bbdde4fd5ed5ed83f
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_efficientformer": [
27
+ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "EfficientFormerConfig",
29
+ ]
30
+ }
31
+
32
+ try:
33
+ if not is_vision_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["image_processing_efficientformer"] = ["EfficientFormerImageProcessor"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_efficientformer"] = [
47
+ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "EfficientFormerForImageClassification",
49
+ "EfficientFormerForImageClassificationWithTeacher",
50
+ "EfficientFormerModel",
51
+ "EfficientFormerPreTrainedModel",
52
+ ]
53
+
54
+ try:
55
+ if not is_tf_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ _import_structure["modeling_tf_efficientformer"] = [
61
+ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
62
+ "TFEfficientFormerForImageClassification",
63
+ "TFEfficientFormerForImageClassificationWithTeacher",
64
+ "TFEfficientFormerModel",
65
+ "TFEfficientFormerPreTrainedModel",
66
+ ]
67
+
68
+ if TYPE_CHECKING:
69
+ from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
70
+
71
+ try:
72
+ if not is_vision_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .image_processing_efficientformer import EfficientFormerImageProcessor
78
+
79
+ try:
80
+ if not is_torch_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ from .modeling_efficientformer import (
86
+ EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
87
+ EfficientFormerForImageClassification,
88
+ EfficientFormerForImageClassificationWithTeacher,
89
+ EfficientFormerModel,
90
+ EfficientFormerPreTrainedModel,
91
+ )
92
+ try:
93
+ if not is_tf_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_tf_efficientformer import (
99
+ TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ TFEfficientFormerForImageClassification,
101
+ TFEfficientFormerForImageClassificationWithTeacher,
102
+ TFEfficientFormerModel,
103
+ TFEfficientFormerPreTrainedModel,
104
+ )
105
+
106
+ else:
107
+ import sys
108
+
109
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.15 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc ADDED
Binary file (27.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ EfficientFormer model configuration"""
16
+
17
+ from typing import List
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class EfficientFormerConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of an [`EfficientFormerModel`]. It is used to
32
+ instantiate an EfficientFormer model according to the specified arguments, defining the model architecture.
33
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the EfficientFormer
34
+ [snap-research/efficientformer-l1](https://huggingface.co/snap-research/efficientformer-l1) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ depths (`List(int)`, *optional*, defaults to `[3, 2, 6, 4]`)
41
+ Depth of each stage.
42
+ hidden_sizes (`List(int)`, *optional*, defaults to `[48, 96, 224, 448]`)
43
+ Dimensionality of each stage.
44
+ downsamples (`List(bool)`, *optional*, defaults to `[True, True, True, True]`)
45
+ Whether or not to downsample inputs between two stages.
46
+ dim (`int`, *optional*, defaults to 448):
47
+ Number of channels in Meta3D layers
48
+ key_dim (`int`, *optional*, defaults to 32):
49
+ The size of the key in meta3D block.
50
+ attention_ratio (`int`, *optional*, defaults to 4):
51
+ Ratio of the dimension of the query and value to the dimension of the key in MSHA block
52
+ resolution (`int`, *optional*, defaults to 7)
53
+ Size of each patch
54
+ num_hidden_layers (`int`, *optional*, defaults to 5):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 8):
57
+ Number of attention heads for each attention layer in the 3D MetaBlock.
58
+ mlp_expansion_ratio (`int`, *optional*, defaults to 4):
59
+ Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings and encoder.
62
+ patch_size (`int`, *optional*, defaults to 16):
63
+ The size (resolution) of each patch.
64
+ num_channels (`int`, *optional*, defaults to 3):
65
+ The number of input channels.
66
+ pool_size (`int`, *optional*, defaults to 3):
67
+ Kernel size of pooling layers.
68
+ downsample_patch_size (`int`, *optional*, defaults to 3):
69
+ The size of patches in downsampling layers.
70
+ downsample_stride (`int`, *optional*, defaults to 2):
71
+ The stride of convolution kernels in downsampling layers.
72
+ downsample_pad (`int`, *optional*, defaults to 1):
73
+ Padding in downsampling layers.
74
+ drop_path_rate (`int`, *optional*, defaults to 0):
75
+ Rate at which to increase dropout probability in DropPath.
76
+ num_meta3d_blocks (`int`, *optional*, defaults to 1):
77
+ The number of 3D MetaBlocks in the last stage.
78
+ distillation (`bool`, *optional*, defaults to `True`):
79
+ Whether to add a distillation head.
80
+ use_layer_scale (`bool`, *optional*, defaults to `True`):
81
+ Whether to scale outputs from token mixers.
82
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-5):
83
+ Factor by which outputs from token mixers are scaled.
84
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
85
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
86
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
87
+ initializer_range (`float`, *optional*, defaults to 0.02):
88
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
89
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
90
+ The epsilon used by the layer normalization layers.
91
+ image_size (`int`, *optional*, defaults to `224`):
92
+ The size (resolution) of each image.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import EfficientFormerConfig, EfficientFormerModel
98
+
99
+ >>> # Initializing a EfficientFormer efficientformer-l1 style configuration
100
+ >>> configuration = EfficientFormerConfig()
101
+
102
+ >>> # Initializing a EfficientFormerModel (with random weights) from the efficientformer-l3 style configuration
103
+ >>> model = EfficientFormerModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "efficientformer"
110
+
111
+ def __init__(
112
+ self,
113
+ depths: List[int] = [3, 2, 6, 4],
114
+ hidden_sizes: List[int] = [48, 96, 224, 448],
115
+ downsamples: List[bool] = [True, True, True, True],
116
+ dim: int = 448,
117
+ key_dim: int = 32,
118
+ attention_ratio: int = 4,
119
+ resolution: int = 7,
120
+ num_hidden_layers: int = 5,
121
+ num_attention_heads: int = 8,
122
+ mlp_expansion_ratio: int = 4,
123
+ hidden_dropout_prob: float = 0.0,
124
+ patch_size: int = 16,
125
+ num_channels: int = 3,
126
+ pool_size: int = 3,
127
+ downsample_patch_size: int = 3,
128
+ downsample_stride: int = 2,
129
+ downsample_pad: int = 1,
130
+ drop_path_rate: float = 0.0,
131
+ num_meta3d_blocks: int = 1,
132
+ distillation: bool = True,
133
+ use_layer_scale: bool = True,
134
+ layer_scale_init_value: float = 1e-5,
135
+ hidden_act: str = "gelu",
136
+ initializer_range: float = 0.02,
137
+ layer_norm_eps: float = 1e-12,
138
+ image_size: int = 224,
139
+ batch_norm_eps: float = 1e-05,
140
+ **kwargs,
141
+ ) -> None:
142
+ super().__init__(**kwargs)
143
+
144
+ self.hidden_act = hidden_act
145
+ self.hidden_dropout_prob = hidden_dropout_prob
146
+ self.hidden_sizes = hidden_sizes
147
+ self.num_hidden_layers = num_hidden_layers
148
+ self.num_attention_heads = num_attention_heads
149
+ self.initializer_range = initializer_range
150
+ self.layer_norm_eps = layer_norm_eps
151
+ self.patch_size = patch_size
152
+ self.num_channels = num_channels
153
+ self.depths = depths
154
+ self.mlp_expansion_ratio = mlp_expansion_ratio
155
+ self.downsamples = downsamples
156
+ self.dim = dim
157
+ self.key_dim = key_dim
158
+ self.attention_ratio = attention_ratio
159
+ self.resolution = resolution
160
+ self.pool_size = pool_size
161
+ self.downsample_patch_size = downsample_patch_size
162
+ self.downsample_stride = downsample_stride
163
+ self.downsample_pad = downsample_pad
164
+ self.drop_path_rate = drop_path_rate
165
+ self.num_meta3d_blocks = num_meta3d_blocks
166
+ self.distillation = distillation
167
+ self.use_layer_scale = use_layer_scale
168
+ self.layer_scale_init_value = layer_scale_init_value
169
+ self.image_size = image_size
170
+ self.batch_norm_eps = batch_norm_eps
venv/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert EfficientFormer checkpoints from the original repository.
17
+
18
+ URL: https://github.com/snap-research/EfficientFormer
19
+ """
20
+
21
+ import argparse
22
+ import re
23
+ from pathlib import Path
24
+
25
+ import requests
26
+ import torch
27
+ from PIL import Image
28
+ from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
29
+
30
+ from transformers import (
31
+ EfficientFormerConfig,
32
+ EfficientFormerForImageClassificationWithTeacher,
33
+ EfficientFormerImageProcessor,
34
+ )
35
+ from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
36
+
37
+
38
+ def rename_key(old_name, num_meta4D_last_stage):
39
+ new_name = old_name
40
+
41
+ if "patch_embed" in old_name:
42
+ _, layer, param = old_name.split(".")
43
+
44
+ if layer == "0":
45
+ new_name = old_name.replace("0", "convolution1")
46
+ elif layer == "1":
47
+ new_name = old_name.replace("1", "batchnorm_before")
48
+ elif layer == "3":
49
+ new_name = old_name.replace("3", "convolution2")
50
+ else:
51
+ new_name = old_name.replace("4", "batchnorm_after")
52
+
53
+ if "network" in old_name and re.search(r"\d\.\d", old_name):
54
+ two_digit_num = r"\b\d{2}\b"
55
+ if bool(re.search(two_digit_num, old_name)):
56
+ match = re.search(r"\d\.\d\d.", old_name).group()
57
+ else:
58
+ match = re.search(r"\d\.\d.", old_name).group()
59
+ if int(match[0]) < 6:
60
+ trimmed_name = old_name.replace(match, "")
61
+ trimmed_name = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1])
62
+ new_name = "intermediate_stages." + trimmed_name
63
+ else:
64
+ trimmed_name = old_name.replace(match, "")
65
+ if int(match[2]) < num_meta4D_last_stage:
66
+ trimmed_name = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2])
67
+ else:
68
+ layer_index = str(int(match[2]) - num_meta4D_last_stage)
69
+ trimmed_name = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index)
70
+ if "norm1" in old_name:
71
+ trimmed_name = trimmed_name.replace("norm1", "layernorm1")
72
+ elif "norm2" in old_name:
73
+ trimmed_name = trimmed_name.replace("norm2", "layernorm2")
74
+ elif "fc1" in old_name:
75
+ trimmed_name = trimmed_name.replace("fc1", "linear_in")
76
+ elif "fc2" in old_name:
77
+ trimmed_name = trimmed_name.replace("fc2", "linear_out")
78
+
79
+ new_name = "last_stage." + trimmed_name
80
+
81
+ elif "network" in old_name and re.search(r".\d.", old_name):
82
+ new_name = old_name.replace("network", "intermediate_stages")
83
+
84
+ if "fc" in new_name:
85
+ new_name = new_name.replace("fc", "convolution")
86
+ elif ("norm1" in new_name) and ("layernorm1" not in new_name):
87
+ new_name = new_name.replace("norm1", "batchnorm_before")
88
+ elif ("norm2" in new_name) and ("layernorm2" not in new_name):
89
+ new_name = new_name.replace("norm2", "batchnorm_after")
90
+ if "proj" in new_name:
91
+ new_name = new_name.replace("proj", "projection")
92
+ if "dist_head" in new_name:
93
+ new_name = new_name.replace("dist_head", "distillation_classifier")
94
+ elif "head" in new_name:
95
+ new_name = new_name.replace("head", "classifier")
96
+ elif "patch_embed" in new_name:
97
+ new_name = "efficientformer." + new_name
98
+ elif new_name == "norm.weight" or new_name == "norm.bias":
99
+ new_name = new_name.replace("norm", "layernorm")
100
+ new_name = "efficientformer." + new_name
101
+ else:
102
+ new_name = "efficientformer.encoder." + new_name
103
+
104
+ return new_name
105
+
106
+
107
+ def convert_torch_checkpoint(checkpoint, num_meta4D_last_stage):
108
+ for key in checkpoint.copy().keys():
109
+ val = checkpoint.pop(key)
110
+ checkpoint[rename_key(key, num_meta4D_last_stage)] = val
111
+
112
+ return checkpoint
113
+
114
+
115
+ # We will verify our results on a COCO image
116
+ def prepare_img():
117
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
118
+ image = Image.open(requests.get(url, stream=True).raw)
119
+
120
+ return image
121
+
122
+
123
+ def convert_efficientformer_checkpoint(
124
+ checkpoint_path: Path, efficientformer_config_file: Path, pytorch_dump_path: Path, push_to_hub: bool
125
+ ):
126
+ orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
127
+ config = EfficientFormerConfig.from_json_file(efficientformer_config_file)
128
+ model = EfficientFormerForImageClassificationWithTeacher(config)
129
+ model_name = "_".join(checkpoint_path.split("/")[-1].split(".")[0].split("_")[:-1])
130
+
131
+ num_meta4D_last_stage = config.depths[-1] - config.num_meta3d_blocks + 1
132
+ new_state_dict = convert_torch_checkpoint(orig_state_dict, num_meta4D_last_stage)
133
+
134
+ model.load_state_dict(new_state_dict)
135
+ model.eval()
136
+
137
+ pillow_resamplings = {
138
+ "bilinear": PILImageResampling.BILINEAR,
139
+ "bicubic": PILImageResampling.BICUBIC,
140
+ "nearest": PILImageResampling.NEAREST,
141
+ }
142
+
143
+ # prepare image
144
+ image = prepare_img()
145
+ image_size = 256
146
+ crop_size = 224
147
+ processor = EfficientFormerImageProcessor(
148
+ size={"shortest_edge": image_size},
149
+ crop_size={"height": crop_size, "width": crop_size},
150
+ resample=pillow_resamplings["bicubic"],
151
+ )
152
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
153
+
154
+ # original processing pipeline
155
+ image_transforms = Compose(
156
+ [
157
+ Resize(image_size, interpolation=pillow_resamplings["bicubic"]),
158
+ CenterCrop(crop_size),
159
+ ToTensor(),
160
+ Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
161
+ ]
162
+ )
163
+ original_pixel_values = image_transforms(image).unsqueeze(0)
164
+
165
+ assert torch.allclose(original_pixel_values, pixel_values)
166
+
167
+ outputs = model(pixel_values)
168
+ logits = outputs.logits
169
+
170
+ expected_shape = (1, 1000)
171
+
172
+ if "l1" in model_name:
173
+ expected_logits = torch.Tensor(
174
+ [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328]
175
+ )
176
+ assert torch.allclose(logits[0, :10], expected_logits, atol=1e-3)
177
+ assert logits.shape == expected_shape
178
+ elif "l3" in model_name:
179
+ expected_logits = torch.Tensor(
180
+ [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127]
181
+ )
182
+ assert torch.allclose(logits[0, :10], expected_logits, atol=1e-3)
183
+ assert logits.shape == expected_shape
184
+ elif "l7" in model_name:
185
+ expected_logits = torch.Tensor(
186
+ [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878]
187
+ )
188
+ assert logits.shape == expected_shape
189
+ else:
190
+ raise ValueError(
191
+ f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7"
192
+ )
193
+
194
+ # Save Checkpoints
195
+ Path(pytorch_dump_path).mkdir(exist_ok=True)
196
+ model.save_pretrained(pytorch_dump_path)
197
+ print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
198
+ processor.save_pretrained(pytorch_dump_path)
199
+ print(f"Processor successfuly saved at {pytorch_dump_path}")
200
+
201
+ if push_to_hub:
202
+ print("Pushing model to the hub...")
203
+
204
+ model.push_to_hub(
205
+ repo_id=f"Bearnardd/{pytorch_dump_path}",
206
+ commit_message="Add model",
207
+ use_temp_dir=True,
208
+ )
209
+ processor.push_to_hub(
210
+ repo_id=f"Bearnardd/{pytorch_dump_path}",
211
+ commit_message="Add image processor",
212
+ use_temp_dir=True,
213
+ )
214
+
215
+
216
+ if __name__ == "__main__":
217
+ parser = argparse.ArgumentParser()
218
+ # Required parameters
219
+ parser.add_argument(
220
+ "--pytorch_model_path",
221
+ default=None,
222
+ type=str,
223
+ required=True,
224
+ help="Path to EfficientFormer pytorch checkpoint.",
225
+ )
226
+ parser.add_argument(
227
+ "--config_file",
228
+ default=None,
229
+ type=str,
230
+ required=True,
231
+ help="The json file for EfficientFormer model config.",
232
+ )
233
+ parser.add_argument(
234
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
235
+ )
236
+
237
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
238
+ parser.add_argument(
239
+ "--no-push_to_hub",
240
+ dest="push_to_hub",
241
+ action="store_false",
242
+ help="Do not push model and image processor to the hub",
243
+ )
244
+ parser.set_defaults(push_to_hub=True)
245
+
246
+ args = parser.parse_args()
247
+ convert_efficientformer_checkpoint(
248
+ checkpoint_path=args.pytorch_model_path,
249
+ efficientformer_config_file=args.config_file,
250
+ pytorch_dump_path=args.pytorch_dump_path,
251
+ push_to_hub=args.push_to_hub,
252
+ )
venv/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for EfficientFormer."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_DEFAULT_MEAN,
29
+ IMAGENET_DEFAULT_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_batched,
35
+ is_scaled_image,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class EfficientFormerImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a EfficientFormer image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
54
+ size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
55
+ size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
56
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
57
+ method.
58
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
59
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
60
+ `preprocess` method.
61
+ do_center_crop (`bool`, *optional*, defaults to `True`):
62
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
63
+ `preprocess` method.
64
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
65
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
66
+ method.
67
+ do_rescale (`bool`, *optional*, defaults to `True`):
68
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
69
+ parameter in the `preprocess` method.
70
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
71
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
72
+ `preprocess` method.
73
+ do_normalize:
74
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
75
+ method.
76
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
77
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
78
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
79
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
80
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
81
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
82
+ """
83
+
84
+ model_input_names = ["pixel_values"]
85
+
86
+ def __init__(
87
+ self,
88
+ do_resize: bool = True,
89
+ size: Optional[Dict[str, int]] = None,
90
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
91
+ do_center_crop: bool = True,
92
+ do_rescale: bool = True,
93
+ rescale_factor: Union[int, float] = 1 / 255,
94
+ crop_size: Dict[str, int] = None,
95
+ do_normalize: bool = True,
96
+ image_mean: Optional[Union[float, List[float]]] = None,
97
+ image_std: Optional[Union[float, List[float]]] = None,
98
+ **kwargs,
99
+ ) -> None:
100
+ super().__init__(**kwargs)
101
+ size = size if size is not None else {"height": 224, "width": 224}
102
+ size = get_size_dict(size)
103
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
104
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
105
+
106
+ self.do_resize = do_resize
107
+ self.do_rescale = do_rescale
108
+ self.do_normalize = do_normalize
109
+ self.do_center_crop = do_center_crop
110
+ self.crop_size = crop_size
111
+ self.size = size
112
+ self.resample = resample
113
+ self.rescale_factor = rescale_factor
114
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
115
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
116
+ self._valid_processor_keys = [
117
+ "images",
118
+ "do_resize",
119
+ "size",
120
+ "resample",
121
+ "do_center_crop",
122
+ "crop_size",
123
+ "do_rescale",
124
+ "rescale_factor",
125
+ "do_normalize",
126
+ "image_mean",
127
+ "image_std",
128
+ "return_tensors",
129
+ "data_format",
130
+ "input_data_format",
131
+ ]
132
+
133
+ def resize(
134
+ self,
135
+ image: np.ndarray,
136
+ size: Dict[str, int],
137
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
138
+ data_format: Optional[Union[str, ChannelDimension]] = None,
139
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
140
+ **kwargs,
141
+ ) -> np.ndarray:
142
+ """
143
+ Resize an image to `(size["height"], size["width"])`.
144
+
145
+ Args:
146
+ image (`np.ndarray`):
147
+ Image to resize.
148
+ size (`Dict[str, int]`):
149
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
150
+ resample:
151
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
152
+ data_format (`ChannelDimension` or `str`, *optional*):
153
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
154
+ image is used. Can be one of:
155
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
156
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
157
+ input_data_format (`ChannelDimension` or `str`, *optional*):
158
+ The channel dimension format of the input image. If not provided, it will be inferred.
159
+
160
+ Returns:
161
+ `np.ndarray`: The resized image.
162
+ """
163
+ size = get_size_dict(size)
164
+
165
+ if "shortest_edge" in size:
166
+ size = get_resize_output_image_size(
167
+ image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
168
+ )
169
+ # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
170
+ elif "height" in size and "width" in size:
171
+ size = (size["height"], size["width"])
172
+ else:
173
+ raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
174
+ return resize(
175
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
176
+ )
177
+
178
+ def preprocess(
179
+ self,
180
+ images: ImageInput,
181
+ do_resize: Optional[bool] = None,
182
+ size: Dict[str, int] = None,
183
+ resample: PILImageResampling = None,
184
+ do_center_crop: bool = None,
185
+ crop_size: int = None,
186
+ do_rescale: Optional[bool] = None,
187
+ rescale_factor: Optional[float] = None,
188
+ do_normalize: Optional[bool] = None,
189
+ image_mean: Optional[Union[float, List[float]]] = None,
190
+ image_std: Optional[Union[float, List[float]]] = None,
191
+ return_tensors: Optional[Union[str, TensorType]] = None,
192
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
193
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
194
+ **kwargs,
195
+ ) -> BatchFeature:
196
+ """
197
+ Preprocess an image or batch of images.
198
+
199
+ Args:
200
+ images (`ImageInput`):
201
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
202
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
203
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
204
+ Whether to resize the image.
205
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
206
+ Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
207
+ resizing.
208
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
209
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
210
+ an effect if `do_resize` is set to `True`.
211
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
212
+ Whether to center crop the image.
213
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
214
+ Whether to rescale the image values between [0 - 1].
215
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
216
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
217
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
218
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
219
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
220
+ Whether to normalize the image.
221
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
222
+ Image mean to use if `do_normalize` is set to `True`.
223
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
224
+ Image standard deviation to use if `do_normalize` is set to `True`.
225
+ return_tensors (`str` or `TensorType`, *optional*):
226
+ The type of tensors to return. Can be one of:
227
+ - Unset: Return a list of `np.ndarray`.
228
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
229
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
230
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
231
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
232
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
233
+ The channel dimension format for the output image. Can be one of:
234
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
235
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
236
+ - Unset: Use the channel dimension format of the input image.
237
+ input_data_format (`ChannelDimension` or `str`, *optional*):
238
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
239
+ from the input image. Can be one of:
240
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
241
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
242
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
243
+ """
244
+ do_resize = do_resize if do_resize is not None else self.do_resize
245
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
246
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
247
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
248
+ crop_size = crop_size if crop_size is not None else self.crop_size
249
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
250
+ resample = resample if resample is not None else self.resample
251
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
252
+ image_mean = image_mean if image_mean is not None else self.image_mean
253
+ image_std = image_std if image_std is not None else self.image_std
254
+
255
+ size = size if size is not None else self.size
256
+ size_dict = get_size_dict(size)
257
+
258
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
259
+
260
+ if not is_batched(images):
261
+ images = [images]
262
+
263
+ if not valid_images(images):
264
+ raise ValueError(
265
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
266
+ "torch.Tensor, tf.Tensor or jax.ndarray."
267
+ )
268
+ validate_preprocess_arguments(
269
+ do_rescale=do_rescale,
270
+ rescale_factor=rescale_factor,
271
+ do_normalize=do_normalize,
272
+ image_mean=image_mean,
273
+ image_std=image_std,
274
+ do_center_crop=do_center_crop,
275
+ crop_size=crop_size,
276
+ do_resize=do_resize,
277
+ size=size,
278
+ resample=resample,
279
+ )
280
+ # All transformations expect numpy arrays.
281
+ images = [to_numpy_array(image) for image in images]
282
+
283
+ if is_scaled_image(images[0]) and do_rescale:
284
+ logger.warning_once(
285
+ "It looks like you are trying to rescale already rescaled images. If the input"
286
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
287
+ )
288
+
289
+ if input_data_format is None:
290
+ # We assume that all images have the same channel dimension format.
291
+ input_data_format = infer_channel_dimension_format(images[0])
292
+
293
+ if do_resize:
294
+ images = [
295
+ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
296
+ for image in images
297
+ ]
298
+
299
+ if do_center_crop:
300
+ images = [
301
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
302
+ ]
303
+
304
+ if do_rescale:
305
+ images = [
306
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
307
+ for image in images
308
+ ]
309
+
310
+ if do_normalize:
311
+ images = [
312
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
313
+ for image in images
314
+ ]
315
+
316
+ images = [
317
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
318
+ ]
319
+
320
+ data = {"pixel_values": images}
321
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Snapchat Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch EfficientFormer model."""
16
+
17
+ import itertools
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...utils import (
30
+ ModelOutput,
31
+ add_code_sample_docstrings,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ )
36
+ from .configuration_efficientformer import EfficientFormerConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ # General docstring
42
+ _CONFIG_FOR_DOC = "EfficientFormerConfig"
43
+
44
+ # Base docstring
45
+ _CHECKPOINT_FOR_DOC = "snap-research/efficientformer-l1-300"
46
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
47
+
48
+ # Image classification docstring
49
+ _IMAGE_CLASS_CHECKPOINT = "snap-research/efficientformer-l1-300"
50
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat"
51
+
52
+
53
+ from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
54
+
55
+
56
+ class EfficientFormerPatchEmbeddings(nn.Module):
57
+ """
58
+ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
59
+ height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
60
+ """
61
+
62
+ def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool = True):
63
+ super().__init__()
64
+ self.num_channels = num_channels
65
+
66
+ self.projection = nn.Conv2d(
67
+ num_channels,
68
+ embed_dim,
69
+ kernel_size=config.downsample_patch_size,
70
+ stride=config.downsample_stride,
71
+ padding=config.downsample_pad,
72
+ )
73
+ self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps) if apply_norm else nn.Identity()
74
+
75
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
76
+ batch_size, num_channels, height, width = pixel_values.shape
77
+ if num_channels != self.num_channels:
78
+ raise ValueError(
79
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
80
+ )
81
+
82
+ embeddings = self.projection(pixel_values)
83
+ embeddings = self.norm(embeddings)
84
+
85
+ return embeddings
86
+
87
+
88
+ class EfficientFormerSelfAttention(nn.Module):
89
+ def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int):
90
+ super().__init__()
91
+
92
+ self.num_heads = num_heads
93
+ self.key_dim = key_dim
94
+ self.attention_ratio = attention_ratio
95
+ self.scale = key_dim**-0.5
96
+ self.total_key_dim = key_dim * num_heads
97
+ self.expanded_key_dim = int(attention_ratio * key_dim)
98
+ self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
99
+ hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
100
+ self.qkv = nn.Linear(dim, hidden_size)
101
+ self.projection = nn.Linear(self.total_expanded_key_dim, dim)
102
+ points = list(itertools.product(range(resolution), range(resolution)))
103
+ num_points = len(points)
104
+ attention_offsets = {}
105
+ idxs = []
106
+ for point_1 in points:
107
+ for point_2 in points:
108
+ offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
109
+ if offset not in attention_offsets:
110
+ attention_offsets[offset] = len(attention_offsets)
111
+ idxs.append(attention_offsets[offset])
112
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
113
+ self.register_buffer("attention_bias_idxs", torch.LongTensor(idxs).view(num_points, num_points))
114
+
115
+ @torch.no_grad()
116
+ def train(self, mode=True):
117
+ super().train(mode)
118
+ if mode and hasattr(self, "ab"):
119
+ del self.ab
120
+ else:
121
+ self.ab = self.attention_biases[:, self.attention_bias_idxs]
122
+
123
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
124
+ batch_size, sequence_length, num_channels = hidden_states.shape
125
+ qkv = self.qkv(hidden_states)
126
+ query_layer, key_layer, value_layer = qkv.reshape(batch_size, sequence_length, self.num_heads, -1).split(
127
+ [self.key_dim, self.key_dim, self.expanded_key_dim], dim=3
128
+ )
129
+ query_layer = query_layer.permute(0, 2, 1, 3)
130
+ key_layer = key_layer.permute(0, 2, 1, 3)
131
+ value_layer = value_layer.permute(0, 2, 1, 3)
132
+
133
+ # set `model.to(torch_device)` won't change `self.ab.device`, if there is no follow-up `train` or `eval` call.
134
+ # Let's do it manually here, so users won't have to do this everytime.
135
+ if not self.training:
136
+ self.ab = self.ab.to(self.attention_biases.device)
137
+ attention_probs = (torch.matmul(query_layer, key_layer.transpose(-2, -1))) * self.scale + (
138
+ self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab
139
+ )
140
+
141
+ attention_probs = attention_probs.softmax(dim=-1)
142
+
143
+ context_layer = torch.matmul(attention_probs, value_layer).transpose(1, 2)
144
+ context_layer = context_layer.reshape(batch_size, sequence_length, self.total_expanded_key_dim)
145
+ context_layer = self.projection(context_layer)
146
+
147
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
148
+
149
+ return outputs
150
+
151
+
152
+ class EfficientFormerConvStem(nn.Module):
153
+ def __init__(self, config: EfficientFormerConfig, out_channels: int):
154
+ super().__init__()
155
+
156
+ self.convolution1 = nn.Conv2d(config.num_channels, out_channels // 2, kernel_size=3, stride=2, padding=1)
157
+ self.batchnorm_before = nn.BatchNorm2d(out_channels // 2, eps=config.batch_norm_eps)
158
+
159
+ self.convolution2 = nn.Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2, padding=1)
160
+ self.batchnorm_after = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
161
+
162
+ self.activation = nn.ReLU()
163
+
164
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
165
+ features = self.batchnorm_before(self.convolution1(pixel_values))
166
+ features = self.activation(features)
167
+ features = self.batchnorm_after(self.convolution2(features))
168
+ features = self.activation(features)
169
+
170
+ return features
171
+
172
+
173
+ class EfficientFormerPooling(nn.Module):
174
+ def __init__(self, pool_size: int):
175
+ super().__init__()
176
+ self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
177
+
178
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
179
+ output = self.pool(hidden_states) - hidden_states
180
+ return output
181
+
182
+
183
+ class EfficientFormerDenseMlp(nn.Module):
184
+ def __init__(
185
+ self,
186
+ config: EfficientFormerConfig,
187
+ in_features: int,
188
+ hidden_features: Optional[int] = None,
189
+ out_features: Optional[int] = None,
190
+ ):
191
+ super().__init__()
192
+ out_features = out_features or in_features
193
+ hidden_features = hidden_features or in_features
194
+
195
+ self.linear_in = nn.Linear(in_features, hidden_features)
196
+ self.activation = ACT2FN[config.hidden_act]
197
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
198
+ self.linear_out = nn.Linear(hidden_features, out_features)
199
+
200
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
201
+ hidden_states = self.linear_in(hidden_states)
202
+ hidden_states = self.activation(hidden_states)
203
+ hidden_states = self.dropout(hidden_states)
204
+ hidden_states = self.linear_out(hidden_states)
205
+ hidden_states = self.dropout(hidden_states)
206
+
207
+ return hidden_states
208
+
209
+
210
+ class EfficientFormerConvMlp(nn.Module):
211
+ def __init__(
212
+ self,
213
+ config: EfficientFormerConfig,
214
+ in_features: int,
215
+ hidden_features: Optional[int] = None,
216
+ out_features: Optional[int] = None,
217
+ drop: float = 0.0,
218
+ ):
219
+ super().__init__()
220
+ out_features = out_features or in_features
221
+ hidden_features = hidden_features or in_features
222
+
223
+ self.convolution1 = nn.Conv2d(in_features, hidden_features, 1)
224
+ self.activation = ACT2FN[config.hidden_act]
225
+ self.convolution2 = nn.Conv2d(hidden_features, out_features, 1)
226
+ self.dropout = nn.Dropout(drop)
227
+
228
+ self.batchnorm_before = nn.BatchNorm2d(hidden_features, eps=config.batch_norm_eps)
229
+ self.batchnorm_after = nn.BatchNorm2d(out_features, eps=config.batch_norm_eps)
230
+
231
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
232
+ hidden_state = self.convolution1(hidden_state)
233
+ hidden_state = self.batchnorm_before(hidden_state)
234
+
235
+ hidden_state = self.activation(hidden_state)
236
+ hidden_state = self.dropout(hidden_state)
237
+ hidden_state = self.convolution2(hidden_state)
238
+
239
+ hidden_state = self.batchnorm_after(hidden_state)
240
+ hidden_state = self.dropout(hidden_state)
241
+
242
+ return hidden_state
243
+
244
+
245
+ # Copied from transformers.models.convnext.modeling_convnext.drop_path
246
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
247
+ """
248
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
249
+
250
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
251
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
252
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
253
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
254
+ argument.
255
+ """
256
+ if drop_prob == 0.0 or not training:
257
+ return input
258
+ keep_prob = 1 - drop_prob
259
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
260
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
261
+ random_tensor.floor_() # binarize
262
+ output = input.div(keep_prob) * random_tensor
263
+ return output
264
+
265
+
266
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->EfficientFormer
267
+ class EfficientFormerDropPath(nn.Module):
268
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
269
+
270
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
271
+ super().__init__()
272
+ self.drop_prob = drop_prob
273
+
274
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
275
+ return drop_path(hidden_states, self.drop_prob, self.training)
276
+
277
+ def extra_repr(self) -> str:
278
+ return "p={}".format(self.drop_prob)
279
+
280
+
281
+ class EfficientFormerFlat(nn.Module):
282
+ def __init__(self):
283
+ super().__init__()
284
+
285
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
286
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
287
+ return hidden_states
288
+
289
+
290
+ class EfficientFormerMeta3D(nn.Module):
291
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0):
292
+ super().__init__()
293
+
294
+ self.token_mixer = EfficientFormerSelfAttention(
295
+ dim=config.dim,
296
+ key_dim=config.key_dim,
297
+ num_heads=config.num_attention_heads,
298
+ attention_ratio=config.attention_ratio,
299
+ resolution=config.resolution,
300
+ )
301
+
302
+ self.layernorm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
303
+ self.layernorm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
304
+
305
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
306
+ self.mlp = EfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim)
307
+
308
+ self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
309
+ self.use_layer_scale = config.use_layer_scale
310
+ if config.use_layer_scale:
311
+ self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
312
+ self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
313
+
314
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
315
+ self_attention_outputs = self.token_mixer(self.layernorm1(hidden_states), output_attentions)
316
+ attention_output = self_attention_outputs[0]
317
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
318
+
319
+ if self.use_layer_scale:
320
+ layer_output = hidden_states + self.drop_path(
321
+ self.layer_scale_1.unsqueeze(0).unsqueeze(0) * attention_output
322
+ )
323
+ layer_output = layer_output + self.drop_path(
324
+ self.layer_scale_2.unsqueeze(0).unsqueeze(0) * self.mlp(self.layernorm2(layer_output))
325
+ )
326
+ else:
327
+ layer_output = hidden_states + self.drop_path(attention_output)
328
+ layer_output = layer_output + self.drop_path(self.mlp(self.layernorm2(layer_output)))
329
+
330
+ outputs = (layer_output,) + outputs
331
+
332
+ return outputs
333
+
334
+
335
+ class EfficientFormerMeta3DLayers(nn.Module):
336
+ def __init__(self, config: EfficientFormerConfig):
337
+ super().__init__()
338
+ drop_paths = [
339
+ config.drop_path_rate * (block_idx + sum(config.depths[:-1]))
340
+ for block_idx in range(config.num_meta3d_blocks)
341
+ ]
342
+ self.blocks = nn.ModuleList(
343
+ [EfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path) for drop_path in drop_paths]
344
+ )
345
+
346
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
347
+ all_attention_outputs = () if output_attentions else None
348
+
349
+ for layer_module in self.blocks:
350
+ if isinstance(hidden_states, tuple):
351
+ hidden_states = hidden_states[0]
352
+
353
+ hidden_states = layer_module(hidden_states, output_attentions)
354
+
355
+ if output_attentions:
356
+ all_attention_outputs = all_attention_outputs + (hidden_states[1],)
357
+
358
+ if output_attentions:
359
+ outputs = (hidden_states[0],) + all_attention_outputs
360
+ return outputs
361
+
362
+ return hidden_states
363
+
364
+
365
+ class EfficientFormerMeta4D(nn.Module):
366
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0):
367
+ super().__init__()
368
+ pool_size = config.pool_size if config.pool_size is not None else 3
369
+ self.token_mixer = EfficientFormerPooling(pool_size=pool_size)
370
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
371
+ self.mlp = EfficientFormerConvMlp(
372
+ config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob
373
+ )
374
+
375
+ self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
376
+ self.use_layer_scale = config.use_layer_scale
377
+ if config.use_layer_scale:
378
+ self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
379
+ self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
380
+
381
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
382
+ outputs = self.token_mixer(hidden_states)
383
+
384
+ if self.use_layer_scale:
385
+ layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * outputs)
386
+
387
+ layer_output = layer_output + self.drop_path(
388
+ self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(layer_output)
389
+ )
390
+ else:
391
+ layer_output = hidden_states + self.drop_path(outputs)
392
+ layer_output = layer_output + self.drop_path(self.mlp(layer_output))
393
+
394
+ return layer_output
395
+
396
+
397
+ class EfficientFormerMeta4DLayers(nn.Module):
398
+ def __init__(self, config: EfficientFormerConfig, stage_idx: int):
399
+ super().__init__()
400
+ num_layers = (
401
+ config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
402
+ )
403
+ drop_paths = [
404
+ config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)
405
+ ]
406
+
407
+ self.blocks = nn.ModuleList(
408
+ [
409
+ EfficientFormerMeta4D(config, config.hidden_sizes[stage_idx], drop_path=drop_path)
410
+ for drop_path in drop_paths
411
+ ]
412
+ )
413
+
414
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
415
+ for layer_module in self.blocks:
416
+ hidden_states = layer_module(hidden_states)
417
+ return hidden_states
418
+
419
+
420
+ class EfficientFormerIntermediateStage(nn.Module):
421
+ def __init__(self, config: EfficientFormerConfig, index: int):
422
+ super().__init__()
423
+ self.meta4D_layers = EfficientFormerMeta4DLayers(config, index)
424
+
425
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
426
+ hidden_states = self.meta4D_layers(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ class EfficientFormerLastStage(nn.Module):
431
+ def __init__(self, config: EfficientFormerConfig):
432
+ super().__init__()
433
+ self.meta4D_layers = EfficientFormerMeta4DLayers(config, -1)
434
+ self.flat = EfficientFormerFlat()
435
+ self.meta3D_layers = EfficientFormerMeta3DLayers(config)
436
+
437
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
438
+ hidden_states = self.meta4D_layers(hidden_states)
439
+ hidden_states = self.flat(hidden_states)
440
+ hidden_states = self.meta3D_layers(hidden_states, output_attentions)
441
+
442
+ return hidden_states
443
+
444
+
445
+ class EfficientFormerEncoder(nn.Module):
446
+ def __init__(self, config: EfficientFormerConfig):
447
+ super().__init__()
448
+ self.config = config
449
+ num_intermediate_stages = len(config.depths) - 1
450
+ downsamples = [
451
+ config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1]
452
+ for i in range(num_intermediate_stages)
453
+ ]
454
+ intermediate_stages = []
455
+
456
+ for i in range(num_intermediate_stages):
457
+ intermediate_stages.append(EfficientFormerIntermediateStage(config, i))
458
+ if downsamples[i]:
459
+ intermediate_stages.append(
460
+ EfficientFormerPatchEmbeddings(config, config.hidden_sizes[i], config.hidden_sizes[i + 1])
461
+ )
462
+
463
+ self.intermediate_stages = nn.ModuleList(intermediate_stages)
464
+ self.last_stage = EfficientFormerLastStage(config)
465
+
466
+ def forward(
467
+ self,
468
+ hidden_states: torch.Tensor,
469
+ output_hidden_states: bool = False,
470
+ output_attentions: bool = False,
471
+ return_dict: bool = True,
472
+ ) -> BaseModelOutput:
473
+ all_hidden_states = () if output_hidden_states else None
474
+ all_self_attentions = () if output_attentions else None
475
+
476
+ if output_hidden_states:
477
+ all_hidden_states = all_hidden_states + (hidden_states,)
478
+
479
+ for layer_module in self.intermediate_stages:
480
+ hidden_states = layer_module(hidden_states)
481
+ if output_hidden_states:
482
+ all_hidden_states = all_hidden_states + (hidden_states,)
483
+
484
+ layer_output = self.last_stage(hidden_states, output_attentions=output_attentions)
485
+
486
+ if output_attentions:
487
+ all_self_attentions = all_self_attentions + layer_output[1:]
488
+
489
+ if output_hidden_states:
490
+ all_hidden_states = all_hidden_states + (layer_output[0],)
491
+
492
+ if not return_dict:
493
+ return tuple(v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None)
494
+
495
+ return BaseModelOutput(
496
+ last_hidden_state=layer_output[0],
497
+ hidden_states=all_hidden_states,
498
+ attentions=all_self_attentions,
499
+ )
500
+
501
+
502
+ class EfficientFormerPreTrainedModel(PreTrainedModel):
503
+ """
504
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
505
+ models.
506
+ """
507
+
508
+ config_class = EfficientFormerConfig
509
+ base_model_prefix = "efficientformer"
510
+ main_input_name = "pixel_values"
511
+ supports_gradient_checkpointing = False
512
+
513
+ def _init_weights(self, module: nn.Module):
514
+ """Initialize the weights"""
515
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
516
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
517
+ if module.bias is not None:
518
+ module.bias.data.zero_()
519
+ elif isinstance(module, nn.LayerNorm):
520
+ module.bias.data.zero_()
521
+ module.weight.data.fill_(1.0)
522
+
523
+
524
+ EFFICIENTFORMER_START_DOCSTRING = r"""
525
+ This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) subclass. Use it as a
526
+ regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.
527
+
528
+ Parameters:
529
+ config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.
530
+ Initializing with a config file does not load the weights associated with the model, only the
531
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
532
+ """
533
+
534
+ EFFICIENTFORMER_INPUTS_DOCSTRING = r"""
535
+ Args:
536
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
537
+ Pixel values. Pixel values can be obtained using [`ViTImageProcessor`]. See
538
+ [`ViTImageProcessor.preprocess`] for details.
539
+ output_attentions (`bool`, *optional*):
540
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
541
+ tensors for more detail.
542
+ output_hidden_states (`bool`, *optional*):
543
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
544
+ more detail.
545
+ return_dict (`bool`, *optional*):
546
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
547
+ """
548
+
549
+
550
+ @add_start_docstrings(
551
+ "The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.",
552
+ EFFICIENTFORMER_START_DOCSTRING,
553
+ )
554
+ class EfficientFormerModel(EfficientFormerPreTrainedModel):
555
+ def __init__(self, config: EfficientFormerConfig):
556
+ super().__init__(config)
557
+ self.config = config
558
+
559
+ self.patch_embed = EfficientFormerConvStem(config, config.hidden_sizes[0])
560
+ self.encoder = EfficientFormerEncoder(config)
561
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
562
+
563
+ # Initialize weights and apply final processing
564
+ self.post_init()
565
+
566
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
567
+ @add_code_sample_docstrings(
568
+ checkpoint=_CHECKPOINT_FOR_DOC,
569
+ output_type=BaseModelOutputWithPooling,
570
+ config_class=_CONFIG_FOR_DOC,
571
+ modality="vision",
572
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
573
+ )
574
+ def forward(
575
+ self,
576
+ pixel_values: Optional[torch.Tensor] = None,
577
+ output_attentions: Optional[bool] = None,
578
+ output_hidden_states: Optional[bool] = None,
579
+ return_dict: Optional[bool] = None,
580
+ ) -> Union[tuple, BaseModelOutput]:
581
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
582
+ output_hidden_states = (
583
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
584
+ )
585
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
586
+
587
+ if pixel_values is None:
588
+ raise ValueError("You have to specify pixel_values")
589
+
590
+ embedding_output = self.patch_embed(pixel_values)
591
+ encoder_outputs = self.encoder(
592
+ embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states
593
+ )
594
+
595
+ sequence_output = encoder_outputs[0]
596
+ sequence_output = self.layernorm(sequence_output)
597
+
598
+ if not return_dict:
599
+ head_outputs = (sequence_output,)
600
+ return head_outputs + encoder_outputs[1:]
601
+
602
+ return BaseModelOutput(
603
+ last_hidden_state=sequence_output,
604
+ hidden_states=encoder_outputs.hidden_states,
605
+ attentions=encoder_outputs.attentions,
606
+ )
607
+
608
+
609
+ @add_start_docstrings(
610
+ """
611
+ EfficientFormer Model transformer with an image classification head on top (a linear layer on top of the final
612
+ hidden state of the [CLS] token) e.g. for ImageNet.
613
+ """,
614
+ EFFICIENTFORMER_START_DOCSTRING,
615
+ )
616
+ class EfficientFormerForImageClassification(EfficientFormerPreTrainedModel):
617
+ def __init__(self, config: EfficientFormerConfig):
618
+ super().__init__(config)
619
+
620
+ self.num_labels = config.num_labels
621
+ self.efficientformer = EfficientFormerModel(config)
622
+
623
+ # Classifier head
624
+ self.classifier = (
625
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
626
+ )
627
+
628
+ # Initialize weights and apply final processing
629
+ self.post_init()
630
+
631
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
632
+ @add_code_sample_docstrings(
633
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
634
+ output_type=ImageClassifierOutput,
635
+ config_class=_CONFIG_FOR_DOC,
636
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
637
+ )
638
+ def forward(
639
+ self,
640
+ pixel_values: Optional[torch.Tensor] = None,
641
+ labels: Optional[torch.Tensor] = None,
642
+ output_attentions: Optional[bool] = None,
643
+ output_hidden_states: Optional[bool] = None,
644
+ return_dict: Optional[bool] = None,
645
+ ) -> Union[tuple, ImageClassifierOutput]:
646
+ r"""
647
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
648
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
649
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
650
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
651
+ """
652
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
653
+
654
+ outputs = self.efficientformer(
655
+ pixel_values,
656
+ output_attentions=output_attentions,
657
+ output_hidden_states=output_hidden_states,
658
+ return_dict=return_dict,
659
+ )
660
+
661
+ sequence_output = outputs[0]
662
+
663
+ logits = self.classifier(sequence_output.mean(-2))
664
+
665
+ loss = None
666
+ if labels is not None:
667
+ if self.config.problem_type is None:
668
+ if self.num_labels == 1:
669
+ self.config.problem_type = "regression"
670
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
671
+ self.config.problem_type = "single_label_classification"
672
+ else:
673
+ self.config.problem_type = "multi_label_classification"
674
+
675
+ if self.config.problem_type == "regression":
676
+ loss_fct = MSELoss()
677
+ if self.num_labels == 1:
678
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
679
+ else:
680
+ loss = loss_fct(logits, labels)
681
+ elif self.config.problem_type == "single_label_classification":
682
+ loss_fct = CrossEntropyLoss()
683
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
684
+ elif self.config.problem_type == "multi_label_classification":
685
+ loss_fct = BCEWithLogitsLoss()
686
+ loss = loss_fct(logits, labels)
687
+
688
+ if not return_dict:
689
+ output = (logits,) + outputs[1:]
690
+ return ((loss,) + output) if loss is not None else output
691
+
692
+ return ImageClassifierOutput(
693
+ loss=loss,
694
+ logits=logits,
695
+ hidden_states=outputs.hidden_states,
696
+ attentions=outputs.attentions,
697
+ )
698
+
699
+
700
+ @dataclass
701
+ class EfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
702
+ """
703
+ Output type of [`EfficientFormerForImageClassificationWithTeacher`].
704
+
705
+ Args:
706
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
707
+ Prediction scores as the average of the cls_logits and distillation logits.
708
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
709
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
710
+ class token).
711
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
712
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
713
+ distillation token).
714
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
715
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
716
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
717
+ plus the initial embedding outputs.
718
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
719
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
720
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
721
+ the self-attention heads.
722
+ """
723
+
724
+ logits: torch.FloatTensor = None
725
+ cls_logits: torch.FloatTensor = None
726
+ distillation_logits: torch.FloatTensor = None
727
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
728
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
729
+
730
+
731
+ @add_start_docstrings(
732
+ """
733
+ EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden
734
+ state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for
735
+ ImageNet.
736
+
737
+ <Tip warning={true}>
738
+
739
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
740
+ supported.
741
+
742
+ </Tip>
743
+ """,
744
+ EFFICIENTFORMER_START_DOCSTRING,
745
+ )
746
+ class EfficientFormerForImageClassificationWithTeacher(EfficientFormerPreTrainedModel):
747
+ def __init__(self, config: EfficientFormerConfig):
748
+ super().__init__(config)
749
+
750
+ self.num_labels = config.num_labels
751
+ self.efficientformer = EfficientFormerModel(config)
752
+
753
+ # Classifier head
754
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
755
+ # Distillation head
756
+ self.distillation_classifier = (
757
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
758
+ )
759
+
760
+ # Initialize weights and apply final processing
761
+ self.post_init()
762
+
763
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
764
+ @add_code_sample_docstrings(
765
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
766
+ output_type=EfficientFormerForImageClassificationWithTeacherOutput,
767
+ config_class=_CONFIG_FOR_DOC,
768
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
769
+ )
770
+ def forward(
771
+ self,
772
+ pixel_values: Optional[torch.Tensor] = None,
773
+ output_attentions: Optional[bool] = None,
774
+ output_hidden_states: Optional[bool] = None,
775
+ return_dict: Optional[bool] = None,
776
+ ) -> Union[tuple, EfficientFormerForImageClassificationWithTeacherOutput]:
777
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
778
+ outputs = self.efficientformer(
779
+ pixel_values,
780
+ output_attentions=output_attentions,
781
+ output_hidden_states=output_hidden_states,
782
+ return_dict=return_dict,
783
+ )
784
+
785
+ sequence_output = outputs[0]
786
+
787
+ cls_logits = self.classifier(sequence_output.mean(-2))
788
+ distillation_logits = self.distillation_classifier(sequence_output.mean(-2))
789
+
790
+ # during inference, return the average of both classifier predictions
791
+ logits = (cls_logits + distillation_logits) / 2
792
+
793
+ if not return_dict:
794
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
795
+ return output
796
+
797
+ return EfficientFormerForImageClassificationWithTeacherOutput(
798
+ logits=logits,
799
+ cls_logits=cls_logits,
800
+ distillation_logits=distillation_logits,
801
+ hidden_states=outputs.hidden_states,
802
+ attentions=outputs.attentions,
803
+ )
venv/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py ADDED
@@ -0,0 +1,1193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Snapchat Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TensorFlow EfficientFormer model."""
16
+
17
+ import itertools
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import tensorflow as tf
22
+
23
+ from ...activations_tf import ACT2FN
24
+ from ...modeling_tf_outputs import (
25
+ TFBaseModelOutput,
26
+ TFBaseModelOutputWithPooling,
27
+ TFImageClassifierOutput,
28
+ )
29
+ from ...modeling_tf_utils import (
30
+ TFPreTrainedModel,
31
+ TFSequenceClassificationLoss,
32
+ get_initializer,
33
+ keras,
34
+ keras_serializable,
35
+ unpack_inputs,
36
+ )
37
+ from ...tf_utils import shape_list, stable_softmax
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ )
45
+ from .configuration_efficientformer import EfficientFormerConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ # General docstring
51
+ _CONFIG_FOR_DOC = "EfficientFormerConfig"
52
+
53
+ # Base docstring
54
+ _CHECKPOINT_FOR_DOC = "snap-research/efficientformer-l1-300"
55
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
56
+
57
+ # Image classification docstring
58
+ _IMAGE_CLASS_CHECKPOINT = "snap-research/efficientformer-l1-300"
59
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_281"
60
+
61
+
62
+ from ..deprecated._archive_maps import TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
63
+
64
+
65
+ class TFEfficientFormerPatchEmbeddings(keras.layers.Layer):
66
+ """
67
+ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
68
+ height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
69
+ """
70
+
71
+ def __init__(
72
+ self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool = True, **kwargs
73
+ ) -> None:
74
+ super().__init__(**kwargs)
75
+ self.num_channels = num_channels
76
+
77
+ self.padding = keras.layers.ZeroPadding2D(padding=config.downsample_pad)
78
+ self.projection = keras.layers.Conv2D(
79
+ filters=embed_dim,
80
+ kernel_size=config.downsample_patch_size,
81
+ strides=config.downsample_stride,
82
+ padding="valid",
83
+ name="projection",
84
+ )
85
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
86
+ self.norm = (
87
+ keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="norm")
88
+ if apply_norm
89
+ else tf.identity
90
+ )
91
+ self.embed_dim = embed_dim
92
+
93
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
94
+ tf.debugging.assert_shapes(
95
+ [(pixel_values, (..., None, None, self.num_channels))],
96
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
97
+ )
98
+ embeddings = self.projection(self.padding(pixel_values))
99
+ embeddings = self.norm(embeddings, training=training)
100
+ return embeddings
101
+
102
+ def build(self, input_shape=None):
103
+ if self.built:
104
+ return
105
+ self.built = True
106
+ if getattr(self, "projection", None) is not None:
107
+ with tf.name_scope(self.projection.name):
108
+ self.projection.build([None, None, None, self.num_channels])
109
+ if getattr(self, "norm", None) is not None:
110
+ if hasattr(self.norm, "name"):
111
+ with tf.name_scope(self.norm.name):
112
+ self.norm.build([None, None, None, self.embed_dim])
113
+
114
+
115
+ class TFEfficientFormerSelfAttention(keras.layers.Layer):
116
+ def __init__(
117
+ self,
118
+ dim: int,
119
+ key_dim: int,
120
+ num_heads: int,
121
+ attention_ratio: int,
122
+ resolution: int,
123
+ config: EfficientFormerConfig,
124
+ **kwargs,
125
+ ):
126
+ super().__init__(**kwargs)
127
+
128
+ self.num_heads = num_heads
129
+ self.key_dim = key_dim
130
+ self.attention_ratio = attention_ratio
131
+ self.scale = key_dim**-0.5
132
+ self.total_key_dim = key_dim * num_heads
133
+ self.expanded_key_dim = int(attention_ratio * key_dim)
134
+ self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
135
+ hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
136
+
137
+ self.qkv = keras.layers.Dense(
138
+ units=hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="qkv"
139
+ )
140
+ self.projection = keras.layers.Dense(
141
+ units=dim, kernel_initializer=get_initializer(config.initializer_range), name="projection"
142
+ )
143
+ self.resolution = resolution
144
+ self.dim = dim
145
+
146
+ def build(self, input_shape: tf.TensorShape) -> None:
147
+ points = list(itertools.product(range(self.resolution), range(self.resolution)))
148
+ num_points = len(points)
149
+ attention_offsets = {}
150
+
151
+ idxs = []
152
+
153
+ for point_1 in points:
154
+ for point_2 in points:
155
+ offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
156
+ if offset not in attention_offsets:
157
+ attention_offsets[offset] = len(attention_offsets)
158
+ idxs.append(attention_offsets[offset])
159
+
160
+ self.attention_biases = self.add_weight(
161
+ shape=(self.num_heads, len(attention_offsets)),
162
+ initializer=keras.initializers.zeros(),
163
+ trainable=True,
164
+ name="attention_biases",
165
+ )
166
+ self.attention_bias_idxs = self.add_weight(
167
+ shape=(num_points, num_points),
168
+ trainable=False,
169
+ dtype=tf.int32,
170
+ name="attention_bias_idxs",
171
+ )
172
+
173
+ self.attention_bias_idxs.assign(tf.reshape(tf.cast(idxs, dtype=tf.int32), (num_points, num_points)))
174
+
175
+ if self.built:
176
+ return
177
+ self.built = True
178
+ if getattr(self, "qkv", None) is not None:
179
+ with tf.name_scope(self.qkv.name):
180
+ self.qkv.build([None, None, self.dim])
181
+ if getattr(self, "projection", None) is not None:
182
+ with tf.name_scope(self.projection.name):
183
+ self.projection.build([None, None, self.total_expanded_key_dim])
184
+
185
+ def call(
186
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
187
+ ) -> Tuple[tf.Tensor]:
188
+ batch_size, sequence_length, *_ = shape_list(hidden_states)
189
+ qkv = self.qkv(inputs=hidden_states)
190
+
191
+ query_layer, key_layer, value_layer = tf.split(
192
+ tf.reshape(tensor=qkv, shape=(batch_size, sequence_length, self.num_heads, -1)),
193
+ num_or_size_splits=[self.key_dim, self.key_dim, self.expanded_key_dim],
194
+ axis=3,
195
+ )
196
+
197
+ query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3])
198
+ key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3])
199
+ value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3])
200
+
201
+ attention_probs = tf.matmul(query_layer, tf.transpose(key_layer, perm=[0, 1, 3, 2]))
202
+ scale = tf.cast(self.scale, dtype=attention_probs.dtype)
203
+ attention_probs = tf.multiply(attention_probs, scale)
204
+
205
+ attention_biases = tf.gather(params=self.attention_biases, indices=self.attention_bias_idxs, axis=1)
206
+ attention_probs = attention_probs + attention_biases
207
+ attention_probs = stable_softmax(logits=attention_probs, axis=-1)
208
+
209
+ context_layer = tf.matmul(attention_probs, value_layer)
210
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
211
+
212
+ context_layer = tf.reshape(
213
+ tensor=context_layer, shape=(batch_size, sequence_length, self.total_expanded_key_dim)
214
+ )
215
+ context_layer = self.projection(context_layer)
216
+
217
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
218
+
219
+ return outputs
220
+
221
+
222
+ class TFEfficientFormerConvStem(keras.layers.Layer):
223
+ def __init__(self, config: EfficientFormerConfig, out_channels: int, **kwargs):
224
+ super().__init__(**kwargs)
225
+
226
+ self.padding = keras.layers.ZeroPadding2D(padding=1)
227
+ self.convolution1 = keras.layers.Conv2D(
228
+ filters=out_channels // 2, kernel_size=3, strides=2, padding="valid", name="convolution1"
229
+ )
230
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
231
+ self.batchnorm_before = keras.layers.BatchNormalization(
232
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_before"
233
+ )
234
+
235
+ self.convolution2 = keras.layers.Conv2D(
236
+ filters=out_channels,
237
+ kernel_size=3,
238
+ strides=2,
239
+ padding="valid",
240
+ name="convolution2",
241
+ )
242
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
243
+ self.batchnorm_after = keras.layers.BatchNormalization(
244
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_after"
245
+ )
246
+
247
+ self.activation = keras.layers.Activation(activation=keras.activations.relu, name="activation")
248
+ self.out_channels = out_channels
249
+ self.config = config
250
+
251
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
252
+ features = self.batchnorm_before(self.convolution1(self.padding(pixel_values)), training=training)
253
+ features = self.activation(features)
254
+ features = self.batchnorm_after(self.convolution2(self.padding(features)), training=training)
255
+ features = self.activation(features)
256
+ return features
257
+
258
+ def build(self, input_shape=None):
259
+ if self.built:
260
+ return
261
+ self.built = True
262
+ if getattr(self, "convolution1", None) is not None:
263
+ with tf.name_scope(self.convolution1.name):
264
+ self.convolution1.build([None, None, None, self.config.num_channels])
265
+ if getattr(self, "batchnorm_before", None) is not None:
266
+ with tf.name_scope(self.batchnorm_before.name):
267
+ self.batchnorm_before.build([None, None, None, self.out_channels // 2])
268
+ if getattr(self, "convolution2", None) is not None:
269
+ with tf.name_scope(self.convolution2.name):
270
+ self.convolution2.build([None, None, None, self.out_channels // 2])
271
+ if getattr(self, "batchnorm_after", None) is not None:
272
+ with tf.name_scope(self.batchnorm_after.name):
273
+ self.batchnorm_after.build([None, None, None, self.out_channels])
274
+ if getattr(self, "activation", None) is not None:
275
+ with tf.name_scope(self.activation.name):
276
+ self.activation.build(None)
277
+
278
+
279
+ class TFEfficientFormerPooling(keras.layers.Layer):
280
+ def __init__(self, pool_size: int, **kwargs):
281
+ super().__init__(**kwargs)
282
+ self.pool = keras.layers.AveragePooling2D(pool_size=pool_size, strides=1, padding="same")
283
+
284
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
285
+ output = self.pool(hidden_states)
286
+ output = output - hidden_states
287
+ return output
288
+
289
+
290
+ class TFEfficientFormerDenseMlp(keras.layers.Layer):
291
+ def __init__(
292
+ self,
293
+ config: EfficientFormerConfig,
294
+ in_features: int,
295
+ hidden_features: Optional[int] = None,
296
+ out_features: Optional[int] = None,
297
+ **kwargs,
298
+ ):
299
+ super().__init__(**kwargs)
300
+ out_features = out_features or in_features
301
+ hidden_features = hidden_features or in_features
302
+
303
+ self.linear_in = keras.layers.Dense(
304
+ units=hidden_features, kernel_initializer=get_initializer(config.initializer_range), name="linear_in"
305
+ )
306
+ self.activation = ACT2FN[config.hidden_act]
307
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
308
+
309
+ self.linear_out = keras.layers.Dense(
310
+ units=out_features, kernel_initializer=get_initializer(config.initializer_range), name="linear_out"
311
+ )
312
+ self.hidden_features = hidden_features
313
+ self.in_features = in_features
314
+
315
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
316
+ hidden_states = self.linear_in(inputs=hidden_states)
317
+ hidden_states = self.activation(hidden_states)
318
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
319
+ hidden_states = self.linear_out(inputs=hidden_states)
320
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
321
+
322
+ return hidden_states
323
+
324
+ def build(self, input_shape=None):
325
+ if self.built:
326
+ return
327
+ self.built = True
328
+ if getattr(self, "linear_in", None) is not None:
329
+ with tf.name_scope(self.linear_in.name):
330
+ self.linear_in.build([None, None, self.in_features])
331
+ if getattr(self, "linear_out", None) is not None:
332
+ with tf.name_scope(self.linear_out.name):
333
+ self.linear_out.build([None, None, self.hidden_features])
334
+
335
+
336
+ class TFEfficientFormerConvMlp(keras.layers.Layer):
337
+ def __init__(
338
+ self,
339
+ config: EfficientFormerConfig,
340
+ in_features: int,
341
+ hidden_features: Optional[int] = None,
342
+ out_features: Optional[int] = None,
343
+ drop: float = 0.0,
344
+ **kwargs,
345
+ ):
346
+ super().__init__(**kwargs)
347
+ out_features = out_features or in_features
348
+ hidden_features = hidden_features or in_features
349
+
350
+ self.convolution1 = keras.layers.Conv2D(
351
+ filters=hidden_features,
352
+ kernel_size=1,
353
+ name="convolution1",
354
+ padding="valid",
355
+ )
356
+
357
+ self.activation = ACT2FN[config.hidden_act]
358
+
359
+ self.convolution2 = keras.layers.Conv2D(
360
+ filters=out_features,
361
+ kernel_size=1,
362
+ name="convolution2",
363
+ padding="valid",
364
+ )
365
+
366
+ self.dropout = keras.layers.Dropout(rate=drop)
367
+
368
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
369
+ self.batchnorm_before = keras.layers.BatchNormalization(
370
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_before"
371
+ )
372
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
373
+ self.batchnorm_after = keras.layers.BatchNormalization(
374
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_after"
375
+ )
376
+ self.hidden_features = hidden_features
377
+ self.in_features = in_features
378
+ self.out_features = out_features
379
+
380
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
381
+ hidden_state = self.convolution1(hidden_state)
382
+ hidden_state = self.batchnorm_before(hidden_state, training=training)
383
+ hidden_state = self.activation(hidden_state)
384
+ hidden_state = self.dropout(hidden_state, training=training)
385
+ hidden_state = self.convolution2(hidden_state)
386
+ hidden_state = self.batchnorm_after(hidden_state, training=training)
387
+ hidden_state = self.dropout(hidden_state, training=training)
388
+ return hidden_state
389
+
390
+ def build(self, input_shape=None):
391
+ if self.built:
392
+ return
393
+ self.built = True
394
+ if getattr(self, "convolution1", None) is not None:
395
+ with tf.name_scope(self.convolution1.name):
396
+ self.convolution1.build([None, None, None, self.in_features])
397
+ if getattr(self, "convolution2", None) is not None:
398
+ with tf.name_scope(self.convolution2.name):
399
+ self.convolution2.build([None, None, None, self.hidden_features])
400
+ if getattr(self, "batchnorm_before", None) is not None:
401
+ with tf.name_scope(self.batchnorm_before.name):
402
+ self.batchnorm_before.build([None, None, None, self.hidden_features])
403
+ if getattr(self, "batchnorm_after", None) is not None:
404
+ with tf.name_scope(self.batchnorm_after.name):
405
+ self.batchnorm_after.build([None, None, None, self.out_features])
406
+
407
+
408
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->EfficientFormer
409
+ class TFEfficientFormerDropPath(keras.layers.Layer):
410
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
411
+ References:
412
+ (1) github.com:rwightman/pytorch-image-models
413
+ """
414
+
415
+ def __init__(self, drop_path: float, **kwargs):
416
+ super().__init__(**kwargs)
417
+ self.drop_path = drop_path
418
+
419
+ def call(self, x: tf.Tensor, training=None):
420
+ if training:
421
+ keep_prob = 1 - self.drop_path
422
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
423
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
424
+ random_tensor = tf.floor(random_tensor)
425
+ return (x / keep_prob) * random_tensor
426
+ return x
427
+
428
+
429
+ class TFEfficientFormerFlat(keras.layers.Layer):
430
+ def __init__(self, **kwargs):
431
+ super().__init__(**kwargs)
432
+
433
+ def call(self, hidden_states: tf.Tensor) -> Tuple[tf.Tensor]:
434
+ batch_size, _, _, in_channels = shape_list(hidden_states)
435
+ hidden_states = tf.reshape(hidden_states, shape=[batch_size, -1, in_channels])
436
+ return hidden_states
437
+
438
+
439
+ class TFEfficientFormerMeta3D(keras.layers.Layer):
440
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0, **kwargs):
441
+ super().__init__(**kwargs)
442
+
443
+ self.token_mixer = TFEfficientFormerSelfAttention(
444
+ dim=config.dim,
445
+ key_dim=config.key_dim,
446
+ num_heads=config.num_attention_heads,
447
+ attention_ratio=config.attention_ratio,
448
+ resolution=config.resolution,
449
+ name="token_mixer",
450
+ config=config,
451
+ )
452
+ self.dim = dim
453
+ self.config = config
454
+
455
+ self.layernorm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm1")
456
+ self.layernorm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm2")
457
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
458
+ self.mlp = TFEfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim, name="mlp")
459
+
460
+ # Using `layers.Activation` instead of `tf.identity` to better control `training' behavior.
461
+ self.drop_path = (
462
+ TFEfficientFormerDropPath(drop_path)
463
+ if drop_path > 0.0
464
+ else keras.layers.Activation("linear", name="drop_path")
465
+ )
466
+ self.config = config
467
+
468
+ def build(self, input_shape=None):
469
+ self.layer_scale_1 = None
470
+ self.layer_scale_2 = None
471
+
472
+ if self.config.use_layer_scale:
473
+ self.layer_scale_1 = self.add_weight(
474
+ shape=(self.dim,),
475
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
476
+ trainable=True,
477
+ name="layer_scale_1",
478
+ )
479
+ self.layer_scale_2 = self.add_weight(
480
+ shape=(self.dim,),
481
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
482
+ trainable=True,
483
+ name="layer_scale_2",
484
+ )
485
+
486
+ if self.built:
487
+ return
488
+ self.built = True
489
+ if getattr(self, "token_mixer", None) is not None:
490
+ with tf.name_scope(self.token_mixer.name):
491
+ self.token_mixer.build(None)
492
+ if getattr(self, "layernorm1", None) is not None:
493
+ with tf.name_scope(self.layernorm1.name):
494
+ self.layernorm1.build([None, None, self.dim])
495
+ if getattr(self, "layernorm2", None) is not None:
496
+ with tf.name_scope(self.layernorm2.name):
497
+ self.layernorm2.build([None, None, self.dim])
498
+ if getattr(self, "mlp", None) is not None:
499
+ with tf.name_scope(self.mlp.name):
500
+ self.mlp.build(None)
501
+ if getattr(self, "drop_path", None) is not None:
502
+ with tf.name_scope(self.drop_path.name):
503
+ self.drop_path.build(None)
504
+
505
+ def call(
506
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
507
+ ) -> Tuple[tf.Tensor]:
508
+ self_attention_outputs = self.token_mixer(
509
+ hidden_states=self.layernorm1(hidden_states, training=training),
510
+ output_attentions=output_attentions,
511
+ training=training,
512
+ )
513
+
514
+ attention_output = self_attention_outputs[0]
515
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
516
+
517
+ if self.config.use_layer_scale:
518
+ layer_output = hidden_states + self.drop_path(
519
+ tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * attention_output,
520
+ training=training,
521
+ )
522
+ layer_output = layer_output + self.drop_path(
523
+ tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0)
524
+ * self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training),
525
+ training=training,
526
+ )
527
+ else:
528
+ layer_output = hidden_states + self.drop_path(attention_output, training=training)
529
+ layer_output = layer_output + self.drop_path(
530
+ self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training),
531
+ training=training,
532
+ )
533
+
534
+ outputs = (layer_output,) + outputs
535
+
536
+ return outputs
537
+
538
+
539
+ class TFEfficientFormerMeta3DLayers(keras.layers.Layer):
540
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
541
+ super().__init__(**kwargs)
542
+ drop_paths = [
543
+ config.drop_path_rate * (block_idx + sum(config.depths[:-1]))
544
+ for block_idx in range(config.num_meta3d_blocks)
545
+ ]
546
+ self.blocks = [
547
+ TFEfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path, name=f"blocks.{i}")
548
+ for i, drop_path in enumerate(drop_paths)
549
+ ]
550
+
551
+ def call(
552
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
553
+ ) -> Tuple[tf.Tensor]:
554
+ all_attention_outputs = () if output_attentions else None
555
+
556
+ for i, layer_module in enumerate(self.blocks):
557
+ if isinstance(hidden_states, tuple):
558
+ hidden_states = hidden_states[0]
559
+
560
+ hidden_states = layer_module(
561
+ hidden_states=hidden_states, output_attentions=output_attentions, training=training
562
+ )
563
+ if output_attentions:
564
+ all_attention_outputs = all_attention_outputs + (hidden_states[1],)
565
+
566
+ if output_attentions:
567
+ outputs = (hidden_states[0],) + all_attention_outputs
568
+ return outputs
569
+
570
+ return hidden_states
571
+
572
+ def build(self, input_shape=None):
573
+ if self.built:
574
+ return
575
+ self.built = True
576
+ if getattr(self, "blocks", None) is not None:
577
+ for layer in self.blocks:
578
+ with tf.name_scope(layer.name):
579
+ layer.build(None)
580
+
581
+
582
+ class TFEfficientFormerMeta4D(keras.layers.Layer):
583
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0, **kwargs):
584
+ super().__init__(**kwargs)
585
+ pool_size = config.pool_size if config.pool_size is not None else 3
586
+ self.token_mixer = TFEfficientFormerPooling(pool_size=pool_size, name="token_mixer")
587
+ self.dim = dim
588
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
589
+ self.mlp = TFEfficientFormerConvMlp(
590
+ config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob, name="mlp"
591
+ )
592
+
593
+ self.drop_path = (
594
+ TFEfficientFormerDropPath(drop_path, name="drop_path")
595
+ if drop_path > 0.0
596
+ else keras.layers.Activation("linear", name="drop_path")
597
+ )
598
+ self.config = config
599
+
600
+ def build(self, input_shape=None):
601
+ self.layer_scale_1 = None
602
+ self.layer_scale_2 = None
603
+
604
+ if self.config.use_layer_scale:
605
+ self.layer_scale_1 = self.add_weight(
606
+ shape=(self.dim),
607
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
608
+ trainable=True,
609
+ name="layer_scale_1",
610
+ )
611
+ self.layer_scale_2 = self.add_weight(
612
+ shape=(self.dim),
613
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
614
+ trainable=True,
615
+ name="layer_scale_2",
616
+ )
617
+
618
+ if self.built:
619
+ return
620
+ self.built = True
621
+ if getattr(self, "token_mixer", None) is not None:
622
+ with tf.name_scope(self.token_mixer.name):
623
+ self.token_mixer.build(None)
624
+ if getattr(self, "mlp", None) is not None:
625
+ with tf.name_scope(self.mlp.name):
626
+ self.mlp.build(None)
627
+ if getattr(self, "drop_path", None) is not None:
628
+ with tf.name_scope(self.drop_path.name):
629
+ self.drop_path.build(None)
630
+
631
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
632
+ outputs = self.token_mixer(hidden_states)
633
+
634
+ if self.config.use_layer_scale:
635
+ layer_output = hidden_states + self.drop_path(
636
+ tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * outputs,
637
+ training=training,
638
+ )
639
+
640
+ layer_output = layer_output + self.drop_path(
641
+ tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0)
642
+ * self.mlp(hidden_state=layer_output, training=training),
643
+ training=training,
644
+ )
645
+
646
+ else:
647
+ layer_output = hidden_states + self.drop_path(outputs, training=training)
648
+ layer_output = layer_output + self.drop_path(
649
+ self.mlp(hidden_state=layer_output, training=training), training=training
650
+ )
651
+
652
+ return layer_output
653
+
654
+
655
+ class TFEfficientFormerMeta4DLayers(keras.layers.Layer):
656
+ def __init__(self, config: EfficientFormerConfig, stage_idx: int, **kwargs):
657
+ super().__init__(**kwargs)
658
+ num_layers = (
659
+ config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
660
+ )
661
+ drop_paths = [
662
+ config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)
663
+ ]
664
+
665
+ self.blocks = [
666
+ TFEfficientFormerMeta4D(
667
+ config=config, dim=config.hidden_sizes[stage_idx], drop_path=drop_paths[i], name=f"blocks.{i}"
668
+ )
669
+ for i in range(len(drop_paths))
670
+ ]
671
+
672
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
673
+ for layer_module in self.blocks:
674
+ hidden_states = layer_module(hidden_states=hidden_states, training=training)
675
+ return hidden_states
676
+
677
+ def build(self, input_shape=None):
678
+ if self.built:
679
+ return
680
+ self.built = True
681
+ if getattr(self, "blocks", None) is not None:
682
+ for layer in self.blocks:
683
+ with tf.name_scope(layer.name):
684
+ layer.build(None)
685
+
686
+
687
+ class TFEfficientFormerIntermediateStage(keras.layers.Layer):
688
+ def __init__(self, config: EfficientFormerConfig, index: int, **kwargs):
689
+ super().__init__(**kwargs)
690
+ self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=index, name="meta4D_layers")
691
+
692
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
693
+ hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
694
+ return hidden_states
695
+
696
+ def build(self, input_shape=None):
697
+ if self.built:
698
+ return
699
+ self.built = True
700
+ if getattr(self, "meta4D_layers", None) is not None:
701
+ with tf.name_scope(self.meta4D_layers.name):
702
+ self.meta4D_layers.build(None)
703
+
704
+
705
+ class TFEfficientFormerLastStage(keras.layers.Layer):
706
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
707
+ super().__init__(**kwargs)
708
+ self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=-1, name="meta4D_layers")
709
+ self.flat = TFEfficientFormerFlat(name="flat")
710
+ self.meta3D_layers = TFEfficientFormerMeta3DLayers(config, name="meta3D_layers")
711
+
712
+ def call(
713
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
714
+ ) -> Tuple[tf.Tensor]:
715
+ hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
716
+ hidden_states = self.flat(hidden_states=hidden_states)
717
+ hidden_states = self.meta3D_layers(
718
+ hidden_states=hidden_states, output_attentions=output_attentions, training=training
719
+ )
720
+
721
+ return hidden_states
722
+
723
+ def build(self, input_shape=None):
724
+ if self.built:
725
+ return
726
+ self.built = True
727
+ if getattr(self, "meta4D_layers", None) is not None:
728
+ with tf.name_scope(self.meta4D_layers.name):
729
+ self.meta4D_layers.build(None)
730
+ if getattr(self, "flat", None) is not None:
731
+ with tf.name_scope(self.flat.name):
732
+ self.flat.build(None)
733
+ if getattr(self, "meta3D_layers", None) is not None:
734
+ with tf.name_scope(self.meta3D_layers.name):
735
+ self.meta3D_layers.build(None)
736
+
737
+
738
+ class TFEfficientFormerEncoder(keras.layers.Layer):
739
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
740
+ super().__init__(**kwargs)
741
+
742
+ self.config = config
743
+ num_intermediate_stages = len(config.depths) - 1
744
+ downsamples = [
745
+ config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1]
746
+ for i in range(num_intermediate_stages)
747
+ ]
748
+
749
+ intermediate_stages = []
750
+ layer_count = -1
751
+ for i in range(num_intermediate_stages):
752
+ layer_count += 1
753
+ intermediate_stages.append(
754
+ TFEfficientFormerIntermediateStage(config, i, name=f"intermediate_stages.{layer_count}")
755
+ )
756
+ if downsamples[i]:
757
+ layer_count += 1
758
+ intermediate_stages.append(
759
+ TFEfficientFormerPatchEmbeddings(
760
+ config,
761
+ config.hidden_sizes[i],
762
+ config.hidden_sizes[i + 1],
763
+ name=f"intermediate_stages.{layer_count}",
764
+ )
765
+ )
766
+ self.intermediate_stages = intermediate_stages
767
+ self.last_stage = TFEfficientFormerLastStage(config, name="last_stage")
768
+
769
+ def call(
770
+ self,
771
+ hidden_states: tf.Tensor,
772
+ output_hidden_states: bool,
773
+ output_attentions: bool,
774
+ return_dict: bool,
775
+ training: bool = False,
776
+ ) -> TFBaseModelOutput:
777
+ all_hidden_states = () if output_hidden_states else None
778
+ all_self_attentions = () if output_attentions else None
779
+
780
+ if output_hidden_states:
781
+ all_hidden_states = all_hidden_states + (hidden_states,)
782
+
783
+ for layer_module in self.intermediate_stages:
784
+ hidden_states = layer_module(hidden_states, training=training)
785
+
786
+ if output_hidden_states:
787
+ all_hidden_states = all_hidden_states + (hidden_states,)
788
+
789
+ layer_output = self.last_stage(hidden_states, output_attentions=output_attentions, training=training)
790
+
791
+ if output_attentions:
792
+ all_self_attentions = all_self_attentions + layer_output[1:]
793
+
794
+ if output_hidden_states:
795
+ all_hidden_states = all_hidden_states + (layer_output[0],)
796
+
797
+ if not return_dict:
798
+ return tuple(v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None)
799
+
800
+ return TFBaseModelOutput(
801
+ last_hidden_state=layer_output[0],
802
+ hidden_states=all_hidden_states,
803
+ attentions=all_self_attentions,
804
+ )
805
+
806
+ def build(self, input_shape=None):
807
+ if self.built:
808
+ return
809
+ self.built = True
810
+ if getattr(self, "last_stage", None) is not None:
811
+ with tf.name_scope(self.last_stage.name):
812
+ self.last_stage.build(None)
813
+ for layer in self.intermediate_stages:
814
+ with tf.name_scope(layer.name):
815
+ layer.build(None)
816
+
817
+
818
+ @keras_serializable
819
+ class TFEfficientFormerMainLayer(keras.layers.Layer):
820
+ config_class = EfficientFormerConfig
821
+
822
+ def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
823
+ super().__init__(**kwargs)
824
+ self.config = config
825
+
826
+ self.patch_embed = TFEfficientFormerConvStem(config, config.hidden_sizes[0], name="patch_embed")
827
+ self.encoder = TFEfficientFormerEncoder(config, name="encoder")
828
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
829
+
830
+ @unpack_inputs
831
+ def call(
832
+ self,
833
+ pixel_values: Optional[tf.Tensor] = None,
834
+ output_attentions: Optional[tf.Tensor] = None,
835
+ output_hidden_states: Optional[tf.Tensor] = None,
836
+ return_dict: Optional[bool] = None,
837
+ training: bool = False,
838
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor, ...]]:
839
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
840
+
841
+ output_hidden_states = (
842
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
843
+ )
844
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
845
+
846
+ if pixel_values is None:
847
+ raise ValueError("You have to specify pixel_values")
848
+
849
+ # When running on CPU, keras.layers.Conv2D and keras.layers.AveragePool2D do not
850
+ # support channels first NCHW format. A number of blocks contain both.
851
+ # So change the input format from (batch_size, num_channels, height, width) to
852
+ # (batch_size, height, width, num_channels) here.
853
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
854
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
855
+ embedding_output = self.patch_embed(pixel_values, training=training)
856
+
857
+ encoder_outputs = self.encoder(
858
+ hidden_states=embedding_output,
859
+ output_attentions=output_attentions,
860
+ output_hidden_states=output_hidden_states,
861
+ return_dict=return_dict,
862
+ training=training,
863
+ )
864
+
865
+ sequence_output = encoder_outputs[0]
866
+ sequence_output = self.layernorm(sequence_output, training=training)
867
+
868
+ # Change the hidden states from (batch_size, height, width, num_channels) to
869
+ # (batch_size, num_channels, height, width).
870
+ # The hidden states are in (batch_size, height, width, num_channels)
871
+ # shape after all stages except the MB3D blocks.
872
+ if output_hidden_states:
873
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1][:-1]]) + (
874
+ encoder_outputs[1][-1],
875
+ )
876
+
877
+ if not return_dict:
878
+ head_outputs = (sequence_output,)
879
+ return head_outputs + encoder_outputs[1:]
880
+
881
+ return TFBaseModelOutput(
882
+ last_hidden_state=sequence_output,
883
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
884
+ attentions=encoder_outputs.attentions,
885
+ )
886
+
887
+ def build(self, input_shape=None):
888
+ if self.built:
889
+ return
890
+ self.built = True
891
+ if getattr(self, "patch_embed", None) is not None:
892
+ with tf.name_scope(self.patch_embed.name):
893
+ self.patch_embed.build(None)
894
+ if getattr(self, "encoder", None) is not None:
895
+ with tf.name_scope(self.encoder.name):
896
+ self.encoder.build(None)
897
+ if getattr(self, "layernorm", None) is not None:
898
+ with tf.name_scope(self.layernorm.name):
899
+ self.layernorm.build([None, None, self.config.hidden_sizes[-1]])
900
+
901
+
902
+ class TFEfficientFormerPreTrainedModel(TFPreTrainedModel):
903
+ """
904
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
905
+ models.
906
+ """
907
+
908
+ config_class = EfficientFormerConfig
909
+ base_model_prefix = "efficientformer"
910
+ main_input_name = "pixel_values"
911
+
912
+
913
+ EFFICIENTFORMER_START_DOCSTRING = r"""
914
+ This model is a TensorFlow
915
+ [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular
916
+ TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.
917
+
918
+
919
+ Parameters:
920
+ config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.
921
+ Initializing with a config file does not load the weights associated with the model, only the
922
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
923
+ """
924
+
925
+ EFFICIENTFORMER_INPUTS_DOCSTRING = r"""
926
+ Args:
927
+ pixel_values ((`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
928
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
929
+ [`EfficientFormerImageProcessor.__call__`] for details.
930
+ output_attentions (`bool`, *optional*):
931
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
932
+ tensors for more detail.
933
+ output_hidden_states (`bool`, *optional*):
934
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
935
+ more detail.
936
+ return_dict (`bool`, *optional*):
937
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
938
+ """
939
+
940
+
941
+ @add_start_docstrings(
942
+ "The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.",
943
+ EFFICIENTFORMER_START_DOCSTRING,
944
+ )
945
+ class TFEfficientFormerModel(TFEfficientFormerPreTrainedModel):
946
+ def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
947
+ super().__init__(config, **kwargs)
948
+
949
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
950
+
951
+ @unpack_inputs
952
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
953
+ @add_code_sample_docstrings(
954
+ checkpoint=_CHECKPOINT_FOR_DOC,
955
+ output_type=TFBaseModelOutputWithPooling,
956
+ config_class=_CONFIG_FOR_DOC,
957
+ modality="vision",
958
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
959
+ )
960
+ def call(
961
+ self,
962
+ pixel_values: Optional[tf.Tensor] = None,
963
+ output_attentions: Optional[bool] = None,
964
+ output_hidden_states: Optional[bool] = None,
965
+ return_dict: Optional[bool] = None,
966
+ training: bool = False,
967
+ ) -> Union[Tuple, TFBaseModelOutput]:
968
+ outputs = self.efficientformer(
969
+ pixel_values=pixel_values,
970
+ output_attentions=output_attentions,
971
+ output_hidden_states=output_hidden_states,
972
+ return_dict=return_dict,
973
+ training=training,
974
+ )
975
+ return outputs
976
+
977
+ def build(self, input_shape=None):
978
+ if self.built:
979
+ return
980
+ self.built = True
981
+ if getattr(self, "efficientformer", None) is not None:
982
+ with tf.name_scope(self.efficientformer.name):
983
+ self.efficientformer.build(None)
984
+
985
+
986
+ @add_start_docstrings(
987
+ """
988
+ EfficientFormer Model transformer with an image classification head on top of pooled last hidden state, e.g. for
989
+ ImageNet.
990
+ """,
991
+ EFFICIENTFORMER_START_DOCSTRING,
992
+ )
993
+ class TFEfficientFormerForImageClassification(TFEfficientFormerPreTrainedModel, TFSequenceClassificationLoss):
994
+ def __init__(self, config: EfficientFormerConfig):
995
+ super().__init__(config)
996
+
997
+ self.num_labels = config.num_labels
998
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
999
+
1000
+ # Classifier head
1001
+ self.classifier = (
1002
+ keras.layers.Dense(config.num_labels, name="classifier")
1003
+ if config.num_labels > 0
1004
+ else keras.layers.Activation("linear", name="classifier")
1005
+ )
1006
+ self.config = config
1007
+
1008
+ @unpack_inputs
1009
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
1010
+ @add_code_sample_docstrings(
1011
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1012
+ output_type=TFImageClassifierOutput,
1013
+ config_class=_CONFIG_FOR_DOC,
1014
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1015
+ )
1016
+ def call(
1017
+ self,
1018
+ pixel_values: Optional[tf.Tensor] = None,
1019
+ labels: Optional[tf.Tensor] = None,
1020
+ output_attentions: Optional[bool] = None,
1021
+ output_hidden_states: Optional[bool] = None,
1022
+ return_dict: Optional[bool] = None,
1023
+ training: bool = False,
1024
+ ) -> Union[tf.Tensor, TFImageClassifierOutput]:
1025
+ r"""
1026
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1027
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1028
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1029
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1030
+ """
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+
1033
+ outputs = self.efficientformer(
1034
+ pixel_values=pixel_values,
1035
+ output_attentions=output_attentions,
1036
+ output_hidden_states=output_hidden_states,
1037
+ return_dict=return_dict,
1038
+ training=training,
1039
+ )
1040
+
1041
+ sequence_output = outputs[0]
1042
+
1043
+ logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
1044
+
1045
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1046
+
1047
+ if not return_dict:
1048
+ output = (logits,) + outputs[1:]
1049
+ return ((loss,) + output) if loss is not None else output
1050
+
1051
+ return TFImageClassifierOutput(
1052
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1053
+ )
1054
+
1055
+ def build(self, input_shape=None):
1056
+ if self.built:
1057
+ return
1058
+ self.built = True
1059
+ if getattr(self, "efficientformer", None) is not None:
1060
+ with tf.name_scope(self.efficientformer.name):
1061
+ self.efficientformer.build(None)
1062
+ if getattr(self, "classifier", None) is not None:
1063
+ if hasattr(self.classifier, "name"):
1064
+ with tf.name_scope(self.classifier.name):
1065
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
1066
+
1067
+
1068
+ @dataclass
1069
+ class TFEfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
1070
+ """
1071
+ Args:
1072
+ Output type of [`EfficientFormerForImageClassificationWithTeacher`].
1073
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
1074
+ Prediction scores as the average of the cls_logits and distillation logits.
1075
+ cls_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
1076
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
1077
+ class token).
1078
+ distillation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
1079
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
1080
+ distillation token).
1081
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when
1082
+ `config.output_hidden_states=True`):
1083
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1084
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
1085
+ the initial embedding outputs.
1086
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when
1087
+ `config.output_attentions=True`):
1088
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1089
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
1090
+ the self-attention heads.
1091
+ """
1092
+
1093
+ logits: tf.Tensor = None
1094
+ cls_logits: tf.Tensor = None
1095
+ distillation_logits: tf.Tensor = None
1096
+ hidden_states: Optional[Tuple[tf.Tensor]] = None
1097
+ attentions: Optional[Tuple[tf.Tensor]] = None
1098
+
1099
+
1100
+ @add_start_docstrings(
1101
+ """
1102
+ EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden
1103
+ state and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
1104
+
1105
+ .. warning::
1106
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
1107
+ supported.
1108
+ """,
1109
+ EFFICIENTFORMER_START_DOCSTRING,
1110
+ )
1111
+ class TFEfficientFormerForImageClassificationWithTeacher(TFEfficientFormerPreTrainedModel):
1112
+ def __init__(self, config: EfficientFormerConfig) -> None:
1113
+ super().__init__(config)
1114
+
1115
+ self.num_labels = config.num_labels
1116
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
1117
+
1118
+ # Classifier heads
1119
+ self.classifier = (
1120
+ keras.layers.Dense(config.num_labels, name="classifier")
1121
+ if config.num_labels > 0
1122
+ else keras.layers.Activation("linear", name="classifier")
1123
+ )
1124
+ self.distillation_classifier = (
1125
+ keras.layers.Dense(config.num_labels, name="distillation_classifier")
1126
+ if config.num_labels > 0
1127
+ else keras.layers.Activation("linear", name="distillation_classifier")
1128
+ )
1129
+
1130
+ @unpack_inputs
1131
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
1132
+ @add_code_sample_docstrings(
1133
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1134
+ output_type=TFEfficientFormerForImageClassificationWithTeacherOutput,
1135
+ config_class=_CONFIG_FOR_DOC,
1136
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1137
+ )
1138
+ def call(
1139
+ self,
1140
+ pixel_values: Optional[tf.Tensor] = None,
1141
+ output_attentions: Optional[bool] = None,
1142
+ output_hidden_states: Optional[bool] = None,
1143
+ return_dict: Optional[bool] = None,
1144
+ training: bool = False,
1145
+ ) -> Union[tuple, TFEfficientFormerForImageClassificationWithTeacherOutput]:
1146
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1147
+
1148
+ if training:
1149
+ raise Exception(
1150
+ "This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported."
1151
+ )
1152
+
1153
+ outputs = self.efficientformer(
1154
+ pixel_values=pixel_values,
1155
+ output_attentions=output_attentions,
1156
+ output_hidden_states=output_hidden_states,
1157
+ return_dict=return_dict,
1158
+ training=training,
1159
+ )
1160
+
1161
+ sequence_output = outputs[0]
1162
+
1163
+ cls_logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
1164
+ distillation_logits = self.distillation_classifier(tf.reduce_mean(sequence_output, axis=-2))
1165
+ logits = (cls_logits + distillation_logits) / 2
1166
+
1167
+ if not return_dict:
1168
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
1169
+ return output
1170
+
1171
+ return TFEfficientFormerForImageClassificationWithTeacherOutput(
1172
+ logits=logits,
1173
+ cls_logits=cls_logits,
1174
+ distillation_logits=distillation_logits,
1175
+ hidden_states=outputs.hidden_states,
1176
+ attentions=outputs.attentions,
1177
+ )
1178
+
1179
+ def build(self, input_shape=None):
1180
+ if self.built:
1181
+ return
1182
+ self.built = True
1183
+ if getattr(self, "efficientformer", None) is not None:
1184
+ with tf.name_scope(self.efficientformer.name):
1185
+ self.efficientformer.build(None)
1186
+ if getattr(self, "classifier", None) is not None:
1187
+ if hasattr(self.classifier, "name"):
1188
+ with tf.name_scope(self.classifier.name):
1189
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
1190
+ if getattr(self, "distillation_classifier", None) is not None:
1191
+ if hasattr(self.distillation_classifier, "name"):
1192
+ with tf.name_scope(self.distillation_classifier.name):
1193
+ self.distillation_classifier.build([None, None, self.config.hidden_sizes[-1]])
venv/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"],
21
+ }
22
+
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_fuyu"] = ["FuyuImageProcessor"]
31
+ _import_structure["processing_fuyu"] = ["FuyuProcessor"]
32
+
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_fuyu"] = [
41
+ "FuyuForCausalLM",
42
+ "FuyuPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_fuyu import FuyuImageProcessor
56
+ from .processing_fuyu import FuyuProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_fuyu import (
65
+ FuyuForCausalLM,
66
+ FuyuPreTrainedModel,
67
+ )
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc ADDED
Binary file (7.93 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc ADDED
Binary file (2.95 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Adept AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Fuyu model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto import CONFIG_MAPPING
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class FuyuConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an
31
+ Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the
33
+ [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b).
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 262144):
41
+ Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`FuyuForCausalLM`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 16384):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 36):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 64):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
52
+ The non-linear activation function (function or string) in the decoder.
53
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
54
+ The maximum sequence length that this model might ever be used with.
55
+ image_size (`int`, *optional*, defaults to 300):
56
+ The input image size.
57
+ patch_size (`int`, *optional*, defaults to 30):
58
+ The input vision transformer encoding patch size.
59
+ num_channels (`int`, *optional*, defaults to 3):
60
+ The input image number of channels.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether to tie input and output embeddings.
70
+ rope_theta (`float`, *optional*, defaults to 25000.0):
71
+ The base period of the RoPE embeddings.
72
+ rope_scaling (`Dict`, *optional*):
73
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
74
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
75
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
76
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
77
+ these scaling strategies behave:
78
+ https://www.reddit.com/r/LocalFuyu/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
79
+ experimental feature, subject to breaking API changes in future versions.
80
+ qk_layernorm (`bool`, *optional*, defaults to `True`):
81
+ Whether or not to normalize the Queries and Keys after projecting the hidden states
82
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
83
+ The dropout ratio after applying the MLP to the hidden states.
84
+ attention_dropout (`float`, *optional*, defaults to 0.0):
85
+ The dropout ratio after computing the attention scores.
86
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
87
+ Percentage of the query and keys which will have rotary embedding.
88
+
89
+ pad_token_id (`int`, *optional*):
90
+ The id of the *padding* token.
91
+ bos_token_id (`int`, *optional*, defaults to 1):
92
+ The id of the *beginning-of-sequence* token.
93
+ eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
94
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
95
+ text_config (`dict`, *optional*):
96
+ Dictionary of configuration options used to initialize the `language``[`Aut`].
97
+
98
+ ```python
99
+ >>> from transformers import FuyuConfig
100
+
101
+ >>> # Initializing a Fuyu fuyu-7b style configuration
102
+ >>> configuration = FuyuConfig()
103
+ ```"""
104
+
105
+ model_type = "fuyu"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=262144,
111
+ hidden_size=4096,
112
+ intermediate_size=16384,
113
+ num_hidden_layers=36,
114
+ num_attention_heads=64,
115
+ hidden_act="relu2",
116
+ max_position_embeddings=16384,
117
+ image_size=300,
118
+ patch_size=30,
119
+ num_channels=3,
120
+ initializer_range=0.02,
121
+ layer_norm_eps=1e-5,
122
+ use_cache=True,
123
+ tie_word_embeddings=False,
124
+ rope_theta=25000.0,
125
+ rope_scaling=None,
126
+ qk_layernorm=True,
127
+ hidden_dropout=0.0,
128
+ attention_dropout=0.0,
129
+ partial_rotary_factor=0.5,
130
+ pad_token_id=None,
131
+ bos_token_id=1,
132
+ eos_token_id=2,
133
+ text_config=None,
134
+ **kwargs,
135
+ ):
136
+ if text_config is None:
137
+ text_config = {
138
+ "vocab_size": vocab_size,
139
+ "max_position_embeddings": max_position_embeddings,
140
+ "hidden_size": hidden_size,
141
+ "intermediate_size": intermediate_size,
142
+ "num_hidden_layers": num_hidden_layers,
143
+ "num_attention_heads": num_attention_heads,
144
+ "hidden_act": hidden_act,
145
+ "initializer_range": initializer_range,
146
+ "layer_norm_eps": layer_norm_eps,
147
+ "use_cache": use_cache,
148
+ "rope_theta": rope_theta,
149
+ "rope_scaling": rope_scaling,
150
+ "qk_layernorm": qk_layernorm,
151
+ "hidden_dropout": hidden_dropout,
152
+ "attention_dropout": attention_dropout,
153
+ "partial_rotary_factor": partial_rotary_factor,
154
+ "pad_token_id": pad_token_id,
155
+ "bos_token_id": bos_token_id,
156
+ "eos_token_id": eos_token_id,
157
+ "tie_word_embeddings": tie_word_embeddings,
158
+ }
159
+ logger.info("text_config is None. initializing the text model with default values.")
160
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "persimmon"
161
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
162
+
163
+ self.vocab_size = vocab_size
164
+ self.max_position_embeddings = max_position_embeddings
165
+ self.image_size = image_size
166
+ self.patch_size = patch_size
167
+ self.num_channels = num_channels
168
+ self.hidden_size = hidden_size
169
+ self.intermediate_size = intermediate_size
170
+ self.num_hidden_layers = num_hidden_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.hidden_act = hidden_act
173
+ self.initializer_range = initializer_range
174
+ self.layer_norm_eps = layer_norm_eps
175
+ self.use_cache = use_cache
176
+ self.rope_theta = rope_theta
177
+ self.rope_scaling = rope_scaling
178
+ self.qk_layernorm = qk_layernorm
179
+ self.hidden_dropout = hidden_dropout
180
+ self.attention_dropout = attention_dropout
181
+ self.partial_rotary_factor = partial_rotary_factor
182
+ self._rope_scaling_validation()
183
+
184
+ super().__init__(
185
+ pad_token_id=pad_token_id,
186
+ bos_token_id=bos_token_id,
187
+ eos_token_id=eos_token_id,
188
+ tie_word_embeddings=tie_word_embeddings,
189
+ **kwargs,
190
+ )
191
+
192
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
193
+ def _rope_scaling_validation(self):
194
+ """
195
+ Validate the `rope_scaling` configuration.
196
+ """
197
+ if self.rope_scaling is None:
198
+ return
199
+
200
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
201
+ raise ValueError(
202
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
203
+ )
204
+ rope_scaling_type = self.rope_scaling.get("type", None)
205
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
206
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
207
+ raise ValueError(
208
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
209
+ )
210
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
211
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
venv/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import os
16
+ import sys
17
+ import warnings
18
+
19
+ import flatdict
20
+ import torch
21
+
22
+ from transformers import FuyuConfig, FuyuForCausalLM, LlamaTokenizer
23
+
24
+
25
+ try:
26
+ from transformers import LlamaTokenizerFast
27
+
28
+ tokenizer_class = LlamaTokenizerFast
29
+ except ImportError as e:
30
+ warnings.warn(e)
31
+ warnings.warn(
32
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
33
+ )
34
+ tokenizer_class = LlamaTokenizer
35
+
36
+ """
37
+ Sample usage: # TODO fix clone links from persimmon to fuyu
38
+ ```
39
+ git clone https://github.com/adept-ai-labs/adept-inference
40
+ wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_base_model_release.tar
41
+ wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar
42
+ python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path
43
+ ```
44
+
45
+ Thereafter, models can be loaded via:
46
+
47
+ ```py
48
+ from transformers import FuyuForCausalLM, FuyuTokenizer
49
+
50
+ model = FuyuForCausalLM.from_pretrained("/output/path")
51
+ tokenizer = FuyuTokenizer.from_pretrained("/output/path")
52
+ ```
53
+
54
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
55
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
56
+ """
57
+
58
+
59
+ KEYS_TO_MODIFY_MAPPING = {
60
+ "self_attention": "self_attn",
61
+ "language_model.encoder": "language_model.model",
62
+ "word_embeddings_for_head": "language_model.lm_head",
63
+ "language_model.embedding.word_embeddings": "language_model.model.embed_tokens",
64
+ "vit_encoder.linear_encoder": "vision_embed_tokens",
65
+ }
66
+
67
+ KEYS_TO_REMOVE = {
68
+ "rotary_emb.inv_freq",
69
+ "image_patch_projection",
70
+ "image_patch_projection.weight",
71
+ "image_patch_projection.bias",
72
+ }
73
+
74
+
75
+ def rename_state_dict(state_dict):
76
+ model_state_dict = {}
77
+ for key, value in state_dict.items():
78
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
79
+ if key_to_modify in key:
80
+ key = key.replace(key_to_modify, new_key)
81
+ # if KEYS_TO_REMOVE in key:
82
+ if key in KEYS_TO_REMOVE:
83
+ continue
84
+ model_state_dict[key] = value
85
+ return model_state_dict
86
+
87
+
88
+ def convert_fuyu_checkpoint(pytorch_dump_folder_path, ada_lib_path, pt_model_path, safe_serialization=False):
89
+ sys.path.insert(0, ada_lib_path)
90
+ model_state_dict_base = torch.load(pt_model_path, map_location="cpu")
91
+ state_dict = flatdict.FlatDict(model_state_dict_base["model"], ".")
92
+ state_dict = rename_state_dict(state_dict)
93
+
94
+ transformers_config = FuyuConfig()
95
+ model = FuyuForCausalLM(transformers_config).to(torch.bfloat16)
96
+ model.load_state_dict(state_dict)
97
+ model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization)
98
+ transformers_config.save_pretrained(pytorch_dump_folder_path)
99
+
100
+
101
+ def main():
102
+ parser = argparse.ArgumentParser()
103
+ parser.add_argument(
104
+ "--input_dir",
105
+ help="Location of Fuyu weights, which contains tokenizer.model and model folders",
106
+ )
107
+ parser.add_argument(
108
+ "--pt_model_path",
109
+ help="Location of Fuyu `model_optim_rng.pt`",
110
+ )
111
+ parser.add_argument(
112
+ "--output_dir",
113
+ help="Location to write HF model and tokenizer",
114
+ )
115
+ parser.add_argument(
116
+ "--ada_lib_path",
117
+ help="Location of original source code from adept to deserialize .pt checkpoint",
118
+ )
119
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
120
+ args = parser.parse_args()
121
+ spm_path = os.path.join(args.input_dir, "adept_vocab.model")
122
+
123
+ convert_fuyu_checkpoint(
124
+ pytorch_dump_folder_path=args.output_dir,
125
+ pt_model_path=args.pt_model_path,
126
+ safe_serialization=args.safe_serialization,
127
+ ada_lib_path=args.ada_lib_path,
128
+ )
129
+ tokenizer = tokenizer_class(spm_path, bos_token="|ENDOFTEXT|", eos_token="|ENDOFTEXT|")
130
+ tokenizer.save_pretrained(args.output_dir)
131
+
132
+
133
+ if __name__ == "__main__":
134
+ main()
venv/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Fuyu."""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
23
+ from ...image_transforms import (
24
+ pad,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ ChannelDimension,
30
+ ImageInput,
31
+ PILImageResampling,
32
+ get_image_size,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ is_valid_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import (
41
+ TensorType,
42
+ is_torch_available,
43
+ is_torch_device,
44
+ is_torch_dtype,
45
+ logging,
46
+ requires_backends,
47
+ )
48
+
49
+
50
+ if is_torch_available():
51
+ import torch
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+
57
+ def make_list_of_list_of_images(
58
+ images: Union[List[List[ImageInput]], List[ImageInput], ImageInput],
59
+ ) -> List[List[ImageInput]]:
60
+ if is_valid_image(images):
61
+ return [[images]]
62
+
63
+ if isinstance(images, list) and all(isinstance(image, list) for image in images):
64
+ return images
65
+
66
+ if isinstance(images, list):
67
+ return [make_list_of_images(image) for image in images]
68
+
69
+ raise ValueError("images must be a list of list of images or a list of images or an image.")
70
+
71
+
72
+ class FuyuBatchFeature(BatchFeature):
73
+ """
74
+ BatchFeature class for Fuyu image processor and processor.
75
+
76
+ The outputs dictionary from the processors contains a mix of tensors and lists of tensors.
77
+ """
78
+
79
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
80
+ """
81
+ Convert the inner content to tensors.
82
+
83
+ Args:
84
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
85
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
86
+ `None`, no modification is done.
87
+ """
88
+ if tensor_type is None:
89
+ return self
90
+
91
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type)
92
+
93
+ def _convert_tensor(elem):
94
+ if is_tensor(elem):
95
+ return elem
96
+ return as_tensor(elem)
97
+
98
+ def _safe_convert_tensor(elem):
99
+ try:
100
+ return _convert_tensor(elem)
101
+ except: # noqa E722
102
+ if key == "overflowing_values":
103
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
104
+ raise ValueError(
105
+ "Unable to create tensor, you should probably activate padding "
106
+ "with 'padding=True' to have batched tensors with the same length."
107
+ )
108
+
109
+ # Do the tensor conversion in batch
110
+ for key, value in self.items():
111
+ if isinstance(value, list) and isinstance(value[0], list):
112
+ # List[List[Any]] -> List[List[Tensor]]
113
+ self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value]
114
+ elif isinstance(value, list):
115
+ # List[Any] -> List[Tensor]
116
+ self[key] = [_safe_convert_tensor(elem) for elem in value]
117
+ else:
118
+ # Any -> Tensor
119
+ self[key] = _safe_convert_tensor(value)
120
+ return self
121
+
122
+ def to(self, *args, **kwargs) -> "BatchFeature":
123
+ """
124
+ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
125
+ different `dtypes` and sending the `BatchFeature` to a different `device`.
126
+
127
+ Args:
128
+ args (`Tuple`):
129
+ Will be passed to the `to(...)` function of the tensors.
130
+ kwargs (`Dict`, *optional*):
131
+ Will be passed to the `to(...)` function of the tensors.
132
+
133
+ Returns:
134
+ [`BatchFeature`]: The same instance after modification.
135
+ """
136
+ requires_backends(self, ["torch"])
137
+ import torch # noqa
138
+
139
+ new_data = {}
140
+ device = kwargs.get("device")
141
+ # Check if the args are a device or a dtype
142
+ if device is None and len(args) > 0:
143
+ # device should be always the first argument
144
+ arg = args[0]
145
+ if is_torch_dtype(arg):
146
+ # The first argument is a dtype
147
+ pass
148
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
149
+ device = arg
150
+ else:
151
+ # it's something else
152
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
153
+
154
+ def _to(elem):
155
+ # check if v is a floating point
156
+ if torch.is_floating_point(elem):
157
+ # cast and send to device
158
+ return elem.to(*args, **kwargs)
159
+ if device is not None:
160
+ return elem.to(device=device)
161
+
162
+ return elem
163
+
164
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
165
+ for k, v in self.items():
166
+ if isinstance(v, list) and isinstance(v[0], list):
167
+ # Data structure is a list of lists
168
+ new_v = []
169
+ for elems in v:
170
+ new_v.append([_to(elem) for elem in elems])
171
+ new_data[k] = new_v
172
+ elif isinstance(v, list):
173
+ # Data structure is a list
174
+ new_data[k] = [_to(elem) for elem in v]
175
+ else:
176
+ new_data[k] = _to(v)
177
+ self.data = new_data
178
+ return self
179
+
180
+
181
+ class FuyuImageProcessor(BaseImageProcessor):
182
+ """
183
+ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should
184
+ handle:
185
+
186
+ - Processing Images:
187
+ Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch
188
+ dimensions. The image output is always img_h, img_w of (1080, 1920)
189
+
190
+ Then, it patches up these images using the patchify_image function.
191
+
192
+ - Creating Image Input IDs:
193
+ For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For
194
+ variable-sized images, each line of patches is terminated with a newline ID.
195
+
196
+ - Image Patch Indices:
197
+ For each image patch, the code maintains an index where these patches should be inserted in a token stream.
198
+
199
+
200
+ Args:
201
+ do_resize (`bool`, *optional*, defaults to `True`):
202
+ Whether to resize the image to `size`.
203
+ size (`Dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`):
204
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
205
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
206
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
207
+ do_pad (`bool`, *optional*, defaults to `True`):
208
+ Whether to pad the image to `size`.
209
+ padding_value (`float`, *optional*, defaults to 1.0):
210
+ The value to pad the image with.
211
+ padding_mode (`str`, *optional*, defaults to `"constant"`):
212
+ The padding mode to use when padding the image.
213
+ do_normalize (`bool`, *optional*, defaults to `True`):
214
+ Whether to normalize the image.
215
+ image_mean (`float`, *optional*, defaults to 0.5):
216
+ The mean to use when normalizing the image.
217
+ image_std (`float`, *optional*, defaults to 0.5):
218
+ The standard deviation to use when normalizing the image.
219
+ do_rescale (`bool`, *optional*, defaults to `True`):
220
+ Whether to rescale the image.
221
+ rescale_factor (`float`, *optional*, defaults to `1 / 255`):
222
+ The factor to use when rescaling the image.
223
+ patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`):
224
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
225
+ """
226
+
227
+ model_input_names = [
228
+ "images",
229
+ "image_input_ids",
230
+ "image_patches",
231
+ "image_patch_indices_per_batch",
232
+ "image_patch_indices_per_subsequence",
233
+ ]
234
+
235
+ def __init__(
236
+ self,
237
+ do_resize: bool = True,
238
+ size: Optional[Dict[str, int]] = None,
239
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
240
+ do_pad: bool = True,
241
+ padding_value: float = 1.0,
242
+ padding_mode: str = "constant",
243
+ do_normalize: bool = True,
244
+ image_mean: Union[float, List[float]] = 0.5,
245
+ image_std: Union[float, List[float]] = 0.5,
246
+ do_rescale: bool = True,
247
+ rescale_factor: float = 1 / 255,
248
+ patch_size: Optional[Dict[str, int]] = None,
249
+ **kwargs,
250
+ ):
251
+ super().__init__(**kwargs)
252
+ self.do_resize = do_resize
253
+ self.size = size if size is not None else {"height": 1080, "width": 1920}
254
+ self.resample = resample
255
+ self.do_pad = do_pad
256
+ self.padding_value = padding_value
257
+ self.padding_mode = padding_mode
258
+ self.do_normalize = do_normalize
259
+ self.image_mean = image_mean
260
+ self.image_std = image_std
261
+ self.do_rescale = do_rescale
262
+ self.rescale_factor = rescale_factor
263
+ self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30}
264
+ self._valid_processor_keys = [
265
+ "images",
266
+ "do_resize",
267
+ "size",
268
+ "resample",
269
+ "do_pad",
270
+ "padding_value",
271
+ "padding_mode",
272
+ "do_normalize",
273
+ "image_mean",
274
+ "image_std",
275
+ "do_rescale",
276
+ "rescale_factor",
277
+ "patch_size",
278
+ "return_tensors",
279
+ "data_format",
280
+ "input_data_format",
281
+ ]
282
+
283
+ def resize(
284
+ self,
285
+ image: np.ndarray,
286
+ size: Dict[str, int],
287
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
288
+ data_format: Optional[Union[str, ChannelDimension]] = None,
289
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
290
+ **kwargs,
291
+ ) -> np.ndarray:
292
+ """
293
+ Resize an image to `(size["height"], size["width"])`.
294
+
295
+ Args:
296
+ image (`np.ndarray`):
297
+ Image to resize.
298
+ size (`Dict[str, int]`):
299
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
300
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
301
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
302
+ data_format (`ChannelDimension` or `str`, *optional*):
303
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
304
+ image is used. Can be one of:
305
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
306
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
307
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
308
+ input_data_format (`ChannelDimension` or `str`, *optional*):
309
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
310
+ from the input image. Can be one of:
311
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
312
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
313
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
314
+
315
+ Returns:
316
+ `np.ndarray`: The resized image.
317
+ """
318
+ image_height, image_width = get_image_size(image, input_data_format)
319
+ target_height, target_width = size["height"], size["width"]
320
+
321
+ if image_width <= target_width and image_height <= target_height:
322
+ return image
323
+
324
+ height_scale_factor = target_height / image_height
325
+ width_scale_factor = target_width / image_width
326
+ optimal_scale_factor = min(height_scale_factor, width_scale_factor)
327
+
328
+ new_height = int(image_height * optimal_scale_factor)
329
+ new_width = int(image_width * optimal_scale_factor)
330
+
331
+ scaled_image = resize(
332
+ image=image,
333
+ size=(new_height, new_width),
334
+ resample=resample,
335
+ data_format=data_format,
336
+ input_data_format=input_data_format,
337
+ **kwargs,
338
+ )
339
+ return scaled_image
340
+
341
+ def pad_image(
342
+ self,
343
+ image: np.ndarray,
344
+ size: Dict[str, int],
345
+ mode: str = "constant",
346
+ constant_values: float = 1.0,
347
+ data_format: Optional[Union[str, ChannelDimension]] = None,
348
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
349
+ ) -> np.ndarray:
350
+ """
351
+ Pad an image to `(size["height"], size["width"])`.
352
+
353
+ Args:
354
+ image (`np.ndarray`):
355
+ Image to pad.
356
+ size (`Dict[str, int]`):
357
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
358
+ data_format (`ChannelDimension` or `str`, *optional*):
359
+ The data format of the output image. If unset, the same format as the input image is used.
360
+ input_data_format (`ChannelDimension` or `str`, *optional*):
361
+ The channel dimension format of the input image. If not provided, it will be inferred.
362
+ """
363
+ image_height, image_width = get_image_size(image, input_data_format)
364
+ target_height, target_width = size["height"], size["width"]
365
+ padding_top = 0
366
+ padding_left = 0
367
+ padding_bottom = target_height - image_height
368
+ padding_right = target_width - image_width
369
+ padded_image = pad(
370
+ image,
371
+ padding=((padding_top, padding_bottom), (padding_left, padding_right)),
372
+ mode=mode,
373
+ constant_values=constant_values,
374
+ data_format=data_format,
375
+ input_data_format=input_data_format,
376
+ )
377
+ return padded_image
378
+
379
+ def preprocess(
380
+ self,
381
+ images,
382
+ do_resize: Optional[bool] = None,
383
+ size: Optional[Dict[str, int]] = None,
384
+ resample: Optional[PILImageResampling] = None,
385
+ do_pad: Optional[bool] = None,
386
+ padding_value: Optional[float] = None,
387
+ padding_mode: Optional[str] = None,
388
+ do_normalize: Optional[bool] = None,
389
+ image_mean: Optional[float] = None,
390
+ image_std: Optional[float] = None,
391
+ do_rescale: Optional[bool] = None,
392
+ rescale_factor: Optional[float] = None,
393
+ patch_size: Optional[Dict[str, int]] = None,
394
+ data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
395
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
396
+ return_tensors: Optional[TensorType] = None,
397
+ ):
398
+ """
399
+
400
+ Utility function to preprocess the images and extract necessary information about original formats.
401
+
402
+ Args:
403
+ images (`ImageInput`):
404
+ Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel
405
+ values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.
406
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
407
+ Whether to resize the image to `size`.
408
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
409
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
410
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
411
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
412
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
413
+ Whether to pad the image to `size`.
414
+ padding_value (`float`, *optional*, defaults to `self.padding_value`):
415
+ The value to pad the image with.
416
+ padding_mode (`str`, *optional*, defaults to `self.padding_mode`):
417
+ The padding mode to use when padding the image.
418
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
419
+ Whether to normalize the image.
420
+ image_mean (`float`, *optional*, defaults to `self.image_mean`):
421
+ The mean to use when normalizing the image.
422
+ image_std (`float`, *optional*, defaults to `self.image_std`):
423
+ The standard deviation to use when normalizing the image.
424
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
425
+ Whether to rescale the image.
426
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
427
+ The factor to use when rescaling the image.
428
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
429
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
430
+ return_tensors (`str` or `TensorType`, *optional*):
431
+ The type of tensors to return. Can be one of:
432
+ - Unset: Return a list of `np.ndarray`.
433
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
434
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
435
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
436
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
437
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
438
+ The channel dimension format of the output image. Can be one of:
439
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
440
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
441
+ input_data_format (`ChannelDimension` or `str`, *optional*):
442
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
443
+ from the input image. Can be one of:
444
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
445
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
446
+ """
447
+
448
+ do_resize = do_resize if do_resize is not None else self.do_resize
449
+ size = size if size is not None else self.size
450
+ resample = resample if resample is not None else self.resample
451
+ do_pad = do_pad if do_pad is not None else self.do_pad
452
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
453
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
454
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
455
+ image_mean = image_mean if image_mean is not None else self.image_mean
456
+ image_std = image_std if image_std is not None else self.image_std
457
+ padding_value = padding_value if padding_value is not None else self.padding_value
458
+ padding_mode = padding_mode if padding_mode is not None else self.padding_mode
459
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
460
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
461
+ patch_size = patch_size if patch_size is not None else self.patch_size
462
+
463
+ if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images):
464
+ raise ValueError("Multiple images for a single sample are not yet supported.")
465
+
466
+ batch_images = make_list_of_list_of_images(images)
467
+
468
+ validate_preprocess_arguments(
469
+ do_rescale=do_rescale,
470
+ rescale_factor=rescale_factor,
471
+ do_normalize=do_normalize,
472
+ image_mean=image_mean,
473
+ image_std=image_std,
474
+ do_pad=do_pad,
475
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
476
+ do_resize=do_resize,
477
+ size=size,
478
+ resample=resample,
479
+ )
480
+ # All transformations expect numpy arrays.
481
+ batch_images = [[to_numpy_array(image) for image in images] for images in batch_images]
482
+
483
+ if is_scaled_image(batch_images[0][0]) and do_rescale:
484
+ logger.warning_once(
485
+ "It looks like you are trying to rescale already rescaled images. If the input"
486
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
487
+ )
488
+
489
+ if input_data_format is None:
490
+ # We assume that all images have the same channel dimension format.
491
+ input_data_format = infer_channel_dimension_format(batch_images[0][0])
492
+
493
+ original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
494
+
495
+ if do_resize:
496
+ batch_images = [
497
+ [self.resize(image, size=size, input_data_format=input_data_format) for image in images]
498
+ for images in batch_images
499
+ ]
500
+
501
+ image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
502
+ image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]
503
+ image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]
504
+
505
+ # scale_h is the same as scale_w
506
+ image_scale_factors = [
507
+ [resized_size[0] / original_size[0]]
508
+ for original_size, resized_size in zip(original_image_sizes, image_sizes)
509
+ ]
510
+
511
+ if do_pad:
512
+ batch_images = [
513
+ [
514
+ self.pad_image(
515
+ image,
516
+ size=size,
517
+ mode=padding_mode,
518
+ constant_values=padding_value,
519
+ input_data_format=input_data_format,
520
+ )
521
+ for image in images
522
+ ]
523
+ for images in batch_images
524
+ ]
525
+
526
+ if do_rescale:
527
+ batch_images = [
528
+ [self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
529
+ for images in batch_images
530
+ ]
531
+
532
+ if do_normalize:
533
+ batch_images = [
534
+ [
535
+ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
536
+ for image in images
537
+ ]
538
+ for images in batch_images
539
+ ]
540
+
541
+ if data_format is not None:
542
+ batch_images = [
543
+ [to_channel_dimension_format(image, data_format, input_data_format) for image in images]
544
+ for images in batch_images
545
+ ]
546
+
547
+ data = {
548
+ "images": batch_images,
549
+ "image_unpadded_heights": image_unpadded_heights,
550
+ "image_unpadded_widths": image_unpadded_widths,
551
+ "image_scale_factors": image_scale_factors,
552
+ }
553
+ return FuyuBatchFeature(data=data, tensor_type=return_tensors)
554
+
555
+ def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int] = None) -> int:
556
+ """
557
+ Calculate number of patches required to encode an image.
558
+
559
+ Args:
560
+ image_height (`int`):
561
+ Height of the image.
562
+ image_width (`int`):
563
+ Width of the image.
564
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
565
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
566
+ """
567
+ patch_size = patch_size if patch_size is not None else self.patch_size
568
+ patch_height, patch_width = self.patch_size["height"], self.patch_size["width"]
569
+
570
+ if image_height % patch_height != 0:
571
+ raise ValueError(f"{image_height=} must be divisible by {patch_height}")
572
+ if image_width % patch_width != 0:
573
+ raise ValueError(f"{image_width=} must be divisible by {patch_width}")
574
+
575
+ num_patches_per_dim_h = image_height // patch_height
576
+ num_patches_per_dim_w = image_width // patch_width
577
+ num_patches = num_patches_per_dim_h * num_patches_per_dim_w
578
+ return num_patches
579
+
580
+ def patchify_image(self, image: "torch.Tensor", patch_size: Optional[Dict[str, int]] = None) -> "torch.Tensor":
581
+ """
582
+ Convert an image into a tensor of patches.
583
+
584
+ Args:
585
+ image (`torch.Tensor`):
586
+ Image to convert. Shape: [batch, channels, height, width]
587
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
588
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
589
+ """
590
+ requires_backends(self, ["torch"])
591
+ patch_size = patch_size if patch_size is not None else self.patch_size
592
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
593
+
594
+ # TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871
595
+ # torch implementation is faster but does not handle non-squares
596
+
597
+ batch_size, channels, _, _ = image.shape
598
+ unfolded_along_height = image.unfold(2, patch_height, patch_height)
599
+ patches = unfolded_along_height.unfold(3, patch_width, patch_width)
600
+ patches = patches.contiguous()
601
+ patches = patches.view(batch_size, channels, -1, patch_height, patch_width)
602
+ patches = patches.permute(0, 2, 3, 4, 1)
603
+ patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)
604
+ return patches
605
+
606
+ def preprocess_with_tokenizer_info(
607
+ self,
608
+ image_input: "torch.Tensor",
609
+ image_present: "torch.Tensor",
610
+ image_unpadded_h: "torch.Tensor",
611
+ image_unpadded_w: "torch.Tensor",
612
+ image_placeholder_id: int,
613
+ image_newline_id: int,
614
+ variable_sized: bool,
615
+ patch_size: Optional[Dict[str, int]] = None,
616
+ ) -> FuyuBatchFeature:
617
+ """Process images for model input. In particular, variable-sized images are handled here.
618
+
619
+ Args:
620
+ image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
621
+ Tensor of images padded to model input size.
622
+ image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
623
+ Tensor of 1s and 0s indicating whether an image is present.
624
+ image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
625
+ Tensor of unpadded image heights.
626
+ image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
627
+ Tensor of unpadded image widths.
628
+ image_placeholder_id (int):
629
+ The id of the image placeholder token. Comes from an associated tokenizer.
630
+ image_newline_id (int):
631
+ The id of the image newline token. Comes from an associated tokenizer.
632
+ variable_sized (bool):
633
+ Whether to process images as variable-sized.
634
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
635
+ Size of the patches.
636
+ """
637
+ requires_backends(self, ["torch"])
638
+
639
+ patch_size = patch_size if patch_size is not None else self.patch_size
640
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
641
+
642
+ # Only images that are present.
643
+ images: List[List[torch.Tensor]] = []
644
+ batch_image_patches: List[List[torch.Tensor]] = []
645
+ # Image input ids for every subsequence, including ones with no image present.
646
+ batch_image_input_ids: List[List[torch.Tensor]] = []
647
+ for batch_index in range(image_input.shape[0]):
648
+ image_input_ids = []
649
+ image_patches = []
650
+ for subseq_index in range(image_input.shape[1]):
651
+ if image_present[batch_index, subseq_index]:
652
+ image = image_input[batch_index, subseq_index]
653
+ image_height, image_width = image.shape[1], image.shape[2]
654
+ if variable_sized:
655
+ # The min() is required here due to floating point issues:
656
+ # math.ceil(torch.tensor(300).cuda() / 30) == 11
657
+ new_h = min(
658
+ image_height,
659
+ math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height,
660
+ )
661
+ new_w = min(
662
+ image_width,
663
+ math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width,
664
+ )
665
+ image = image[:, :new_h, :new_w]
666
+ image_height, image_width = new_h, new_w
667
+
668
+ num_patches = self.get_num_patches(image_height=image_height, image_width=image_width)
669
+ tensor_of_image_ids = torch.full(
670
+ [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device
671
+ )
672
+ patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0)
673
+ assert num_patches == patches.shape[0]
674
+
675
+ if variable_sized:
676
+ # Now terminate each line with |NEWLINE|.
677
+ tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)
678
+ newline_ids = torch.full(
679
+ [tensor_of_image_ids.shape[0], 1],
680
+ image_newline_id,
681
+ dtype=torch.int32,
682
+ device=image_input.device,
683
+ )
684
+ tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)
685
+ tensor_of_image_ids = tensor_of_image_ids.reshape(-1)
686
+
687
+ images.append([image])
688
+ image_input_ids.append(tensor_of_image_ids)
689
+ image_patches.append(patches)
690
+ else:
691
+ image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))
692
+
693
+ batch_image_input_ids.append(image_input_ids)
694
+ batch_image_patches.append(image_patches)
695
+
696
+ # Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in
697
+ # the stream.
698
+ image_patch_indices_per_batch: List[List[torch.Tensor]] = []
699
+ image_patch_indices_per_subsequence: List[List[torch.Tensor]] = []
700
+
701
+ for sample_image_input_ids in batch_image_input_ids:
702
+ index_offset = 0
703
+ per_batch_indices = []
704
+ per_subsequence_indices = []
705
+ for subseq_image_input_ids in sample_image_input_ids:
706
+ # Indices of image patches.
707
+ patches_mask = subseq_image_input_ids == image_placeholder_id
708
+ num_patches = torch.count_nonzero(patches_mask)
709
+ indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(
710
+ subseq_image_input_ids
711
+ )
712
+
713
+ # Place those indices in the image input ids token stream, with -1 representing non-index tokens.
714
+ indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)
715
+ indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)
716
+ patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]
717
+
718
+ indices_in_stream_per_batch[patches_inds] = indices + index_offset
719
+ indices_in_stream_per_subsequence[patches_inds] = indices
720
+
721
+ per_batch_indices.append(indices_in_stream_per_batch)
722
+ per_subsequence_indices.append(indices_in_stream_per_subsequence)
723
+ index_offset += num_patches
724
+
725
+ image_patch_indices_per_batch.append(per_batch_indices)
726
+ image_patch_indices_per_subsequence.append(per_subsequence_indices)
727
+
728
+ return FuyuBatchFeature(
729
+ data={
730
+ "images": images,
731
+ "image_input_ids": batch_image_input_ids,
732
+ "image_patches": batch_image_patches,
733
+ "image_patch_indices_per_batch": image_patch_indices_per_batch,
734
+ "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence,
735
+ }
736
+ )
venv/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Fuyu model."""
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.utils.checkpoint
20
+ from torch import nn
21
+
22
+ from ...modeling_outputs import CausalLMOutputWithPast
23
+ from ...modeling_utils import PreTrainedModel
24
+ from ...models.auto.modeling_auto import AutoModelForCausalLM
25
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
26
+ from .configuration_fuyu import FuyuConfig
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ _CONFIG_FOR_DOC = "FuyuConfig"
32
+
33
+
34
+ FUYU_START_DOCSTRING = r"""
35
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
36
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
37
+ etc.)
38
+
39
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
40
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
41
+ and behavior.
42
+
43
+ Parameters:
44
+ config ([`FuyuConfig`]):
45
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
46
+ load the weights associated with the model, only the configuration. Check out the
47
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
48
+ """
49
+
50
+
51
+ @add_start_docstrings(
52
+ "The bare Fuyu Model outputting raw hidden-states without any specific head on top.",
53
+ FUYU_START_DOCSTRING,
54
+ )
55
+ class FuyuPreTrainedModel(PreTrainedModel):
56
+ config_class = FuyuConfig
57
+ base_model_prefix = "fuyu"
58
+ supports_gradient_checkpointing = True
59
+ _no_split_modules = []
60
+ _skip_keys_device_placement = "past_key_values"
61
+
62
+ def _init_weights(self, module):
63
+ std = self.config.initializer_range
64
+ if isinstance(module, nn.Linear):
65
+ module.weight.data.normal_(mean=0.0, std=std)
66
+ if module.bias is not None:
67
+ module.bias.data.zero_()
68
+ elif isinstance(module, nn.Embedding):
69
+ module.weight.data.normal_(mean=0.0, std=std)
70
+ if module.padding_idx is not None:
71
+ module.weight.data[module.padding_idx].zero_()
72
+
73
+
74
+ FUYU_INPUTS_DOCSTRING = r"""
75
+ Args:
76
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
77
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
78
+ it.
79
+
80
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
81
+ [`PreTrainedTokenizer.__call__`] for details.
82
+
83
+ [What are input IDs?](../glossary#input-ids)
84
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
85
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
86
+
87
+ - 1 for tokens that are **not masked**,
88
+ - 0 for tokens that are **masked**.
89
+
90
+ [What are attention masks?](../glossary#attention-mask)
91
+
92
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
93
+ [`PreTrainedTokenizer.__call__`] for details.
94
+
95
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
96
+ `past_key_values`).
97
+
98
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
99
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
100
+ information on the default strategy.
101
+
102
+ - 1 indicates the head is **not masked**,
103
+ - 0 indicates the head is **masked**.
104
+ image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):
105
+ Image patches to be used as continuous embeddings. The patches are flattened and then projected to the
106
+ hidden size of the model.
107
+ image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*):
108
+ Indices indicating at which position the image_patches have to be inserted in input_embeds.
109
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
110
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
111
+ config.n_positions - 1]`.
112
+
113
+ [What are position IDs?](../glossary#position-ids)
114
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
115
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
116
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
117
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
118
+
119
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
120
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
121
+
122
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
123
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
124
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
125
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
126
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
127
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
128
+ model's internal embedding lookup matrix.
129
+ use_cache (`bool`, *optional*):
130
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
131
+ `past_key_values`).
132
+ output_attentions (`bool`, *optional*):
133
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
134
+ tensors for more detail.
135
+ output_hidden_states (`bool`, *optional*):
136
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
137
+ more detail.
138
+ return_dict (`bool`, *optional*):
139
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
140
+ """
141
+
142
+
143
+ @add_start_docstrings(
144
+ "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.",
145
+ FUYU_START_DOCSTRING,
146
+ )
147
+ class FuyuForCausalLM(FuyuPreTrainedModel):
148
+ def __init__(self, config: FuyuConfig):
149
+ super().__init__(config)
150
+ self.padding_idx = config.pad_token_id
151
+ self.vocab_size = config.vocab_size
152
+ self.language_model = AutoModelForCausalLM.from_config(config.text_config)
153
+
154
+ self.vision_embed_tokens = nn.Linear(
155
+ config.patch_size * config.patch_size * config.num_channels, config.hidden_size
156
+ )
157
+
158
+ self.gradient_checkpointing = False
159
+ # Initialize weights and apply final processing
160
+ self.post_init()
161
+
162
+ def get_input_embeddings(self):
163
+ return self.language_model.get_input_embeddings()
164
+
165
+ def set_input_embeddings(self, value):
166
+ self.language_model.set_input_embeddings(value)
167
+
168
+ def gather_continuous_embeddings(
169
+ self,
170
+ word_embeddings: torch.Tensor,
171
+ continuous_embeddings: List[torch.Tensor],
172
+ image_patch_input_indices: torch.Tensor,
173
+ ) -> torch.Tensor:
174
+ """This function places the continuous_embeddings into the word_embeddings at the locations
175
+ indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous
176
+ embeddings.
177
+
178
+ Args:
179
+ word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
180
+ Tensor of word embeddings.
181
+ continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
182
+ Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape
183
+ [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative
184
+ indices in image_patch_input_indices for that batch element.
185
+ image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
186
+ Tensor of indices of the image patches in the input_ids tensor.
187
+ """
188
+ if not (word_embeddings.shape[0] == len(continuous_embeddings)):
189
+ raise ValueError(
190
+ f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}"
191
+ )
192
+
193
+ output_embeddings = word_embeddings.clone()
194
+ for batch_idx in range(word_embeddings.shape[0]):
195
+ # First, find the positions of all the non-negative values in image_patch_input_indices, those are the
196
+ # positions in word_embeddings that we want to replace with content from continuous_embeddings.
197
+ dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]
198
+ # Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we
199
+ # want to use to replace the values in word_embeddings.
200
+ src_indices = image_patch_input_indices[batch_idx][dst_indices]
201
+ # Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated.
202
+ if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:
203
+ raise ValueError(
204
+ f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match "
205
+ f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}."
206
+ )
207
+ output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices]
208
+ return output_embeddings
209
+
210
+ @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING)
211
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
212
+ def forward(
213
+ self,
214
+ input_ids: torch.LongTensor = None,
215
+ image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ]
216
+ image_patches_indices: torch.Tensor = None,
217
+ attention_mask: Optional[torch.Tensor] = None,
218
+ position_ids: Optional[torch.LongTensor] = None,
219
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
220
+ inputs_embeds: Optional[torch.FloatTensor] = None,
221
+ use_cache: Optional[bool] = None,
222
+ labels: Optional[torch.Tensor] = None,
223
+ output_attentions: Optional[bool] = None,
224
+ output_hidden_states: Optional[bool] = None,
225
+ return_dict: Optional[bool] = None,
226
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
227
+ r"""
228
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
229
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
230
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
231
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
232
+
233
+ Returns:
234
+
235
+ Examples:
236
+
237
+ ```python
238
+ >>> from transformers import FuyuProcessor, FuyuForCausalLM
239
+ >>> from PIL import Image
240
+ >>> import requests
241
+
242
+ >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b")
243
+ >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b")
244
+
245
+ >>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
246
+ >>> image = Image.open(requests.get(url, stream=True).raw)
247
+ >>> prompt = "Generate a coco-style caption.\n"
248
+
249
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
250
+ >>> outputs = model(**inputs)
251
+
252
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=7)
253
+ >>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True)
254
+ >>> print(generation_text[0])
255
+ A blue bus parked on the side of a road.
256
+ ```"""
257
+
258
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
259
+ output_hidden_states = (
260
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
261
+ )
262
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
263
+
264
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
265
+
266
+ if input_ids is not None and inputs_embeds is not None:
267
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
268
+ elif input_ids is not None:
269
+ batch_size, seq_length = input_ids.shape
270
+ elif inputs_embeds is not None:
271
+ batch_size, seq_length, _ = inputs_embeds.shape
272
+ else:
273
+ raise ValueError("You have to specify either input_is or inputs_embeds")
274
+
275
+ seq_length_with_past = seq_length
276
+ past_key_values_length = 0
277
+
278
+ if past_key_values is not None:
279
+ past_key_values_length = past_key_values[0][0].shape[2]
280
+ seq_length_with_past = seq_length_with_past + past_key_values_length
281
+
282
+ if position_ids is None:
283
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
284
+ position_ids = torch.arange(
285
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
286
+ )
287
+ position_ids = position_ids.unsqueeze(0)
288
+
289
+ if inputs_embeds is None:
290
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
291
+ if image_patches is not None and past_key_values is None:
292
+ patch_embeddings = [
293
+ self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype))
294
+ .squeeze(0)
295
+ .to(inputs_embeds.device)
296
+ for patch in image_patches
297
+ ]
298
+ inputs_embeds = self.gather_continuous_embeddings(
299
+ word_embeddings=inputs_embeds,
300
+ continuous_embeddings=patch_embeddings,
301
+ image_patch_input_indices=image_patches_indices,
302
+ )
303
+
304
+ outputs = self.language_model(
305
+ inputs_embeds=inputs_embeds,
306
+ attention_mask=attention_mask,
307
+ position_ids=position_ids,
308
+ past_key_values=past_key_values,
309
+ output_attentions=output_attentions,
310
+ output_hidden_states=output_hidden_states,
311
+ labels=labels,
312
+ use_cache=use_cache,
313
+ return_dict=return_dict,
314
+ )
315
+
316
+ return outputs
317
+
318
+ def prepare_inputs_for_generation(
319
+ self,
320
+ input_ids,
321
+ past_key_values=None,
322
+ attention_mask=None,
323
+ inputs_embeds=None,
324
+ image_patches=None,
325
+ image_patches_indices=None,
326
+ **kwargs,
327
+ ):
328
+ if past_key_values:
329
+ input_ids = input_ids[:, -1:]
330
+
331
+ position_ids = kwargs.get("position_ids", None)
332
+ if attention_mask is not None and position_ids is None:
333
+ # create position_ids on the fly for batch generation
334
+ position_ids = attention_mask.long().cumsum(-1) - 1
335
+ position_ids.masked_fill_(attention_mask == 0, 1)
336
+ if past_key_values:
337
+ position_ids = position_ids[:, -1].unsqueeze(-1)
338
+
339
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
340
+ if inputs_embeds is not None and past_key_values is None:
341
+ model_inputs = {"inputs_embeds": inputs_embeds}
342
+ else:
343
+ model_inputs = {"input_ids": input_ids}
344
+
345
+ if image_patches_indices is not None:
346
+ model_inputs["image_patches_indices"] = image_patches_indices
347
+
348
+ model_inputs.update(
349
+ {
350
+ "position_ids": position_ids,
351
+ "past_key_values": past_key_values,
352
+ "use_cache": kwargs.get("use_cache"),
353
+ "attention_mask": attention_mask,
354
+ "image_patches_indices": image_patches_indices if past_key_values is None else None,
355
+ "image_patches": image_patches if past_key_values is None else None,
356
+ }
357
+ )
358
+ return model_inputs
venv/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for GIT
17
+ """
18
+ import re
19
+ from typing import Dict, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy
25
+ from ...utils import TensorType, is_torch_available, logging, requires_backends
26
+
27
+
28
+ if is_torch_available():
29
+ from .image_processing_fuyu import FuyuBatchFeature
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ if is_torch_available():
36
+ import torch
37
+
38
+
39
+ TEXT_REPR_BBOX_OPEN = "<box>"
40
+ TEXT_REPR_BBOX_CLOSE = "</box>"
41
+ TEXT_REPR_POINT_OPEN = "<point>"
42
+ TEXT_REPR_POINT_CLOSE = "</point>"
43
+
44
+ TOKEN_BBOX_OPEN_STRING = "<0x00>" # <bbox>
45
+ TOKEN_BBOX_CLOSE_STRING = "<0x01>" # </bbox>
46
+ TOKEN_POINT_OPEN_STRING = "<0x02>" # <point>
47
+ TOKEN_POINT_CLOSE_STRING = "<0x03>" # </point>
48
+ BEGINNING_OF_ANSWER_STRING = "<0x04>" # <boa>
49
+
50
+
51
+ def full_unpacked_stream_to_tensor(
52
+ all_bi_tokens_to_place: List[int],
53
+ full_unpacked_stream: List["torch.Tensor"],
54
+ fill_value: int,
55
+ batch_size: int,
56
+ new_seq_len: int,
57
+ offset: int,
58
+ ) -> "torch.Tensor":
59
+ """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does
60
+ the required padding to create a single tensor for the batch of shape batch_size x new_seq_len.
61
+ """
62
+
63
+ assert len(all_bi_tokens_to_place) == batch_size
64
+ assert len(full_unpacked_stream) == batch_size
65
+
66
+ # Create padded tensors for the full batch.
67
+ new_padded_tensor = torch.full(
68
+ [batch_size, new_seq_len],
69
+ fill_value=fill_value,
70
+ dtype=full_unpacked_stream[0].dtype,
71
+ device=full_unpacked_stream[0].device,
72
+ )
73
+
74
+ # Place each batch entry into the batch tensor.
75
+ for bi in range(batch_size):
76
+ tokens_to_place = all_bi_tokens_to_place[bi]
77
+ new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset]
78
+
79
+ return new_padded_tensor
80
+
81
+
82
+ def construct_full_unpacked_stream(
83
+ num_real_text_tokens: Union[List[List[int]], "torch.Tensor"],
84
+ input_stream: "torch.Tensor",
85
+ image_tokens: List[List["torch.Tensor"]],
86
+ batch_size: int,
87
+ num_sub_sequences: int,
88
+ ) -> List["torch.Tensor"]:
89
+ """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required
90
+ padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch.
91
+ Returns a list of tensors, one for each item in the batch."""
92
+
93
+ all_bi_stream = []
94
+
95
+ for batch_index in range(batch_size):
96
+ all_si_stream = []
97
+
98
+ # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence
99
+ # and append to lists. We use lists rather than tensors because each subsequence is variable-sized.
100
+ # TODO Remove this logic in a subsequent release since subsequences are not supported.
101
+ image_adjustment = image_tokens[batch_index][0]
102
+ subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0)
103
+ num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0]
104
+ all_si_stream.append(subsequence_stream[:num_real_tokens])
105
+ all_bi_stream.append(torch.cat(all_si_stream, dim=0))
106
+
107
+ return all_bi_stream
108
+
109
+
110
+ def _replace_string_repr_with_token_tags(prompt: str) -> str:
111
+ prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING)
112
+ prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING)
113
+ prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING)
114
+ prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING)
115
+ return prompt
116
+
117
+
118
+ def _segment_prompt_into_text_token_conversions(prompt: str) -> List:
119
+ """
120
+ Given a string prompt, converts the prompt into a list of TextTokenConversions.
121
+ """
122
+ # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt
123
+ prompt_text_list: List = []
124
+ regex_pattern = re.compile(
125
+ f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})"
126
+ )
127
+ # Split by the regex pattern
128
+ prompt_split = regex_pattern.split(prompt)
129
+ for i, elem in enumerate(prompt_split):
130
+ if len(elem) == 0 or elem in [
131
+ TOKEN_BBOX_OPEN_STRING,
132
+ TOKEN_BBOX_CLOSE_STRING,
133
+ TOKEN_POINT_OPEN_STRING,
134
+ TOKEN_POINT_CLOSE_STRING,
135
+ ]:
136
+ continue
137
+ prompt_text_list.append(
138
+ (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING])
139
+ )
140
+ return prompt_text_list
141
+
142
+
143
+ def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]:
144
+ """
145
+ This function transforms the prompt in the following fashion:
146
+ - <box> <point> and </box> </point> to their respective token mappings
147
+ - extract the coordinates from the tag
148
+ - transform the coordinates into the transformed image space
149
+ - return the prompt tokens with the transformed coordinates and new tags
150
+
151
+ Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces
152
+ and punctuation added above are NOT optional.
153
+ """
154
+ # Make a namedtuple that stores "text" and "is_bbox"
155
+
156
+ # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function
157
+ # When point or box close tag, continue tokenizing normally
158
+ # First, we replace the point and box tags with their respective tokens
159
+ prompt = _replace_string_repr_with_token_tags(prompt)
160
+ # Tokenize the prompt
161
+ # Convert prompt into a list split
162
+ prompt_text_list = _segment_prompt_into_text_token_conversions(prompt)
163
+ transformed_prompt_tokens: List[int] = []
164
+ for elem in prompt_text_list:
165
+ if elem[1]:
166
+ # This is a location, we need to tokenize it
167
+ within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer)
168
+ # Surround the text with the open and close tags
169
+ transformed_prompt_tokens.extend(within_tag_tokenized)
170
+ else:
171
+ transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids)
172
+ return transformed_prompt_tokens
173
+
174
+
175
+ def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]:
176
+ """
177
+ Given a bounding box of the fashion <box>1, 2, 3, 4</box> | <point>1, 2</point> This function is responsible for
178
+ converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas.
179
+ """
180
+ # Convert the text into a list of strings.
181
+ num_int_strs = text.split(",")
182
+ if len(num_int_strs) == 2:
183
+ # If there are any open or close tags, remove them.
184
+ token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING]
185
+ token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING]
186
+ else:
187
+ token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING]
188
+ token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING]
189
+
190
+ # Remove all spaces from num_ints
191
+ num_ints = [float(num.strip()) for num in num_int_strs]
192
+ # scale to transformed image siz
193
+ if len(num_ints) == 2:
194
+ num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor)
195
+ elif len(num_ints) == 4:
196
+ num_ints_translated = scale_bbox_to_transformed_image(
197
+ top=num_ints[0],
198
+ left=num_ints[1],
199
+ bottom=num_ints[2],
200
+ right=num_ints[3],
201
+ scale_factor=scale_factor,
202
+ )
203
+ else:
204
+ raise ValueError(f"Invalid number of ints: {len(num_ints)}")
205
+ # Tokenize the text, skipping the
206
+ tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated]
207
+ return [token_space_open_string] + tokens + [token_space_close_string]
208
+
209
+
210
+ def _tokenize_prompts_with_image_and_batch(
211
+ tokenizer,
212
+ prompts: List[List[str]],
213
+ scale_factors: Optional[List[List["torch.Tensor"]]],
214
+ max_tokens_to_generate: int,
215
+ max_position_embeddings: int,
216
+ add_BOS: bool, # Same issue with types as above
217
+ add_beginning_of_answer_token: bool,
218
+ ) -> Tuple["torch.Tensor", "torch.Tensor"]:
219
+ """
220
+ Given a set of prompts and number of tokens to generate:
221
+ - tokenize prompts
222
+ - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate
223
+ - pad all the sequences to this length so we can convert them into a 3D tensor.
224
+ """
225
+
226
+ # If not tool use, tranform the coordinates while tokenizing
227
+ if scale_factors is not None:
228
+ transformed_prompt_tokens = []
229
+ for prompt_seq, scale_factor_seq in zip(prompts, scale_factors):
230
+ transformed_prompt_tokens.append(
231
+ [
232
+ _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer)
233
+ for prompt, scale_factor in zip(prompt_seq, scale_factor_seq)
234
+ ]
235
+ )
236
+ else:
237
+ transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts]
238
+
239
+ prompts_tokens = transformed_prompt_tokens
240
+
241
+ if add_BOS:
242
+ bos_token = tokenizer.vocab["<s>"]
243
+ else:
244
+ bos_token = tokenizer.vocab["|ENDOFTEXT|"]
245
+ prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens]
246
+ if add_beginning_of_answer_token:
247
+ boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING]
248
+ # Only add bbox open token to the last subsequence since that is what will be completed
249
+ for token_seq in prompts_tokens:
250
+ token_seq[-1].append(boa)
251
+
252
+ # Now we have a list of list of tokens which each list has a different
253
+ # size. We want to extend this list to:
254
+ # - incorporate the tokens that need to be generated
255
+ # - make all the sequences equal length.
256
+ # Get the prompts length.
257
+
258
+ prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens]
259
+ # Get the max prompts length.
260
+ max_prompt_len: int = np.max(prompts_length)
261
+ # Number of tokens in the each sample of the batch.
262
+ samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings)
263
+ if max_prompt_len + max_tokens_to_generate > max_position_embeddings:
264
+ logger.warning(
265
+ f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}",
266
+ f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.",
267
+ )
268
+ # Now update the list of list to be of the same size: samples_length.
269
+ for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length):
270
+ for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq):
271
+ if len(prompt_tokens) > samples_length:
272
+ raise ValueError("Length of subsequence prompt exceeds sequence length.")
273
+ padding_size = samples_length - prompt_length
274
+ prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size)
275
+
276
+ # Now we are in a structured format, we can convert to tensors.
277
+ prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64)
278
+ prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64)
279
+
280
+ return prompts_tokens_tensor, prompts_length_tensor
281
+
282
+
283
+ # Simplified assuming self.crop_top = self.padding_top = 0
284
+ def original_to_transformed_h_coords(original_coords, scale_h):
285
+ return np.round(original_coords * scale_h).astype(np.int32)
286
+
287
+
288
+ # Simplified assuming self.crop_left = self.padding_left = 0
289
+ def original_to_transformed_w_coords(original_coords, scale_w):
290
+ return np.round(original_coords * scale_w).astype(np.int32)
291
+
292
+
293
+ def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]:
294
+ x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0]
295
+ y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0]
296
+ return [x_scaled, y_scaled]
297
+
298
+
299
+ def scale_bbox_to_transformed_image(
300
+ top: float, left: float, bottom: float, right: float, scale_factor: float
301
+ ) -> List[int]:
302
+ top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0]
303
+ left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0]
304
+ bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0]
305
+ right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0]
306
+ return [top_scaled, left_scaled, bottom_scaled, right_scaled]
307
+
308
+
309
+ class FuyuProcessor(ProcessorMixin):
310
+ r"""
311
+ Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor.
312
+
313
+ [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the
314
+ [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information.
315
+
316
+ Args:
317
+ image_processor ([`FuyuImageProcessor`]):
318
+ The image processor is a required input.
319
+ tokenizer ([`LlamaTokenizerFast`]):
320
+ The tokenizer is a required input.
321
+ """
322
+
323
+ attributes = ["image_processor", "tokenizer"]
324
+ image_processor_class = "FuyuImageProcessor"
325
+ tokenizer_class = "AutoTokenizer"
326
+
327
+ def __init__(self, image_processor, tokenizer):
328
+ super().__init__(image_processor=image_processor, tokenizer=tokenizer)
329
+ self.image_processor = image_processor
330
+ self.tokenizer = tokenizer
331
+ self.max_tokens_to_generate = 10
332
+ self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it?
333
+ self.pad_token_id = 0
334
+ self.dummy_image_index = -1
335
+
336
+ def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool):
337
+ max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs)
338
+ max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs)
339
+
340
+ batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []}
341
+
342
+ for entry in model_inputs:
343
+ for key, tensor in entry.items():
344
+ if key == "input_ids":
345
+ num_padding_tokens = max_length_input_ids - tensor.shape[1]
346
+ padded_input_ids = torch.cat(
347
+ [
348
+ torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long),
349
+ tensor,
350
+ ],
351
+ dim=1,
352
+ )
353
+ batched_inputs[key].append(padded_input_ids)
354
+
355
+ attention_mask = torch.cat(
356
+ [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)],
357
+ dim=1,
358
+ )
359
+ batched_inputs["attention_mask"].append(attention_mask)
360
+
361
+ elif key == "image_patches":
362
+ # For image_patches, we don't pad but just append them to the list.
363
+ batched_inputs[key].append(tensor)
364
+
365
+ else: # for image_patches_indices
366
+ num_padding_indices = max_length_image_patch_indices - tensor.shape[1]
367
+ padded_indices = torch.cat(
368
+ [
369
+ torch.full(
370
+ (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long
371
+ ),
372
+ tensor,
373
+ ],
374
+ dim=1,
375
+ )
376
+ batched_inputs[key].append(padded_indices)
377
+ batched_keys = ["input_ids", "image_patches_indices"]
378
+ if return_attention_mask:
379
+ batched_keys.append("attention_mask")
380
+ for key in batched_keys:
381
+ batched_inputs[key] = torch.cat(batched_inputs[key], dim=0)
382
+
383
+ return batched_inputs
384
+
385
+ def get_sample_encoding(
386
+ self,
387
+ prompts,
388
+ scale_factors,
389
+ image_unpadded_heights,
390
+ image_unpadded_widths,
391
+ image_placeholder_id,
392
+ image_newline_id,
393
+ tensor_batch_images,
394
+ ):
395
+ image_present = torch.ones(1, 1, 1)
396
+ model_image_input = self.image_processor.preprocess_with_tokenizer_info(
397
+ image_input=tensor_batch_images,
398
+ image_present=image_present,
399
+ image_unpadded_h=image_unpadded_heights,
400
+ image_unpadded_w=image_unpadded_widths,
401
+ image_placeholder_id=image_placeholder_id,
402
+ image_newline_id=image_newline_id,
403
+ variable_sized=True,
404
+ )
405
+ # FIXME max_tokens_to_generate is embedded into this processor's call.
406
+ prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch(
407
+ tokenizer=self.tokenizer,
408
+ prompts=prompts,
409
+ scale_factors=scale_factors,
410
+ max_tokens_to_generate=self.max_tokens_to_generate,
411
+ max_position_embeddings=self.max_position_embeddings,
412
+ add_BOS=True,
413
+ add_beginning_of_answer_token=True,
414
+ )
415
+ image_padded_unpacked_tokens = construct_full_unpacked_stream(
416
+ num_real_text_tokens=prompts_length,
417
+ input_stream=prompt_tokens,
418
+ image_tokens=model_image_input["image_input_ids"],
419
+ batch_size=1,
420
+ num_sub_sequences=self.subsequence_length,
421
+ )
422
+ # Construct inputs for image patch indices.
423
+ unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream(
424
+ num_real_text_tokens=prompts_length,
425
+ input_stream=torch.full_like(prompt_tokens, -1),
426
+ image_tokens=model_image_input["image_patch_indices_per_batch"],
427
+ batch_size=1,
428
+ num_sub_sequences=self.subsequence_length,
429
+ )
430
+ max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens)
431
+ max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings)
432
+ tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0]))
433
+
434
+ # Use same packing logic for the image patch indices.
435
+ image_patch_input_indices = full_unpacked_stream_to_tensor(
436
+ all_bi_tokens_to_place=[tokens_to_place],
437
+ full_unpacked_stream=unpacked_image_patch_indices_per_batch,
438
+ fill_value=-1,
439
+ batch_size=1,
440
+ new_seq_len=max_seq_len_batch,
441
+ offset=0,
442
+ )
443
+ image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]])
444
+ batch_encoding = {
445
+ "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0),
446
+ "image_patches": image_patches_tensor,
447
+ "image_patches_indices": image_patch_input_indices,
448
+ }
449
+ return batch_encoding
450
+
451
+ def __call__(
452
+ self,
453
+ text=None,
454
+ images=None,
455
+ add_special_tokens: bool = True,
456
+ return_attention_mask: bool = True,
457
+ padding: Union[bool, str, PaddingStrategy] = False,
458
+ truncation: Union[bool, str, TruncationStrategy] = None,
459
+ max_length: Optional[int] = None,
460
+ stride: int = 0,
461
+ pad_to_multiple_of: Optional[int] = None,
462
+ return_overflowing_tokens: bool = False,
463
+ return_special_tokens_mask: bool = False,
464
+ return_offsets_mapping: bool = False,
465
+ return_token_type_ids: bool = False,
466
+ return_length: bool = False,
467
+ verbose: bool = True,
468
+ return_tensors: Optional[Union[str, TensorType]] = None,
469
+ **kwargs,
470
+ ) -> "FuyuBatchFeature":
471
+ """
472
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
473
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to
474
+ encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
475
+ FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
476
+ of the above two methods for more information.
477
+
478
+ Args:
479
+ text (`str`, `List[str]`):
480
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
481
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
482
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
483
+ images (`PIL.Image.Image`, `List[PIL.Image.Image]`):
484
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
485
+ tensor. Both channels-first and channels-last formats are supported.
486
+
487
+ Returns:
488
+ [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields:
489
+
490
+ - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`.
491
+ - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`.
492
+ - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model.
493
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when
494
+ `return_attention_mask=True`.
495
+ """
496
+ requires_backends(self, ["torch"])
497
+
498
+ # --- Check input validity ---
499
+ if not return_attention_mask:
500
+ raise ValueError("`return_attention_mask=False` is not supported for this model.")
501
+ if text is None and images is None:
502
+ raise ValueError("You have to specify either text or images. Both cannot be None.")
503
+ if text is not None and images is None:
504
+ logger.warning("You are processing a text with no associated image. Make sure it is intended.")
505
+ self.current_processor = self.tokenizer
506
+ text_encoding = self.tokenizer(
507
+ text=text,
508
+ add_special_tokens=add_special_tokens,
509
+ padding=padding,
510
+ truncation=truncation,
511
+ max_length=max_length,
512
+ stride=stride,
513
+ pad_to_multiple_of=pad_to_multiple_of,
514
+ return_attention_mask=return_attention_mask,
515
+ return_overflowing_tokens=return_overflowing_tokens,
516
+ return_special_tokens_mask=return_special_tokens_mask,
517
+ return_offsets_mapping=return_offsets_mapping,
518
+ return_token_type_ids=return_token_type_ids,
519
+ return_length=return_length,
520
+ verbose=verbose,
521
+ return_tensors=return_tensors,
522
+ **kwargs,
523
+ )
524
+ return text_encoding
525
+
526
+ if text is None and images is not None:
527
+ logger.warning("You are processing an image with no associated text. Make sure it is intended.")
528
+ prompts = [[""]]
529
+ if text is not None and images is not None:
530
+ if isinstance(text, str):
531
+ prompts = [[text]]
532
+ elif isinstance(text, list):
533
+ prompts = [[text_seq] for text_seq in text]
534
+
535
+ # --- Preprocess images using self.image_processor ---
536
+
537
+ # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors
538
+ image_encoding = self.image_processor.preprocess(images, return_tensors="pt")
539
+ batch_images = image_encoding["images"]
540
+ image_unpadded_heights = image_encoding["image_unpadded_heights"]
541
+ image_unpadded_widths = image_encoding["image_unpadded_widths"]
542
+ scale_factors = image_encoding["image_scale_factors"]
543
+ self.subsequence_length = 1 # Each batch contains only one sequence.
544
+ self.batch_size = len(batch_images)
545
+
546
+ # --- Use self.tokenizer to get the ids of special tokens to insert into image ids ---
547
+
548
+ image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1]
549
+ image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1]
550
+ tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1)
551
+
552
+ # --- Use self.image_processor again to obtain the full token ids and batch inputs ---
553
+ all_encodings = []
554
+
555
+ for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip(
556
+ prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images
557
+ ):
558
+ sample_encoding = self.get_sample_encoding(
559
+ prompts=[prompt],
560
+ scale_factors=[scale_factor],
561
+ image_unpadded_heights=torch.tensor([image_unpadded_height]),
562
+ image_unpadded_widths=torch.tensor([image_unpadded_width]),
563
+ image_placeholder_id=image_placeholder_id,
564
+ image_newline_id=image_newline_id,
565
+ tensor_batch_images=tensor_batch_image.unsqueeze(0),
566
+ )
567
+ all_encodings.append(sample_encoding)
568
+ batch_encoding = self._left_pad_inputs_with_attention_mask(
569
+ model_inputs=all_encodings, return_attention_mask=return_attention_mask
570
+ )
571
+ return FuyuBatchFeature(data=batch_encoding)
572
+
573
+ def post_process_box_coordinates(self, outputs, target_sizes=None):
574
+ """
575
+ Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space.
576
+ Coordinates will be returned in "box" format, with the following pattern:
577
+ `<box>top, left, bottom, right</box>`
578
+
579
+ Point coordinates are not supported yet.
580
+
581
+ Args:
582
+ outputs ([`GenerateOutput`]):
583
+ Raw outputs from `generate`.
584
+ target_sizes (`torch.Tensor`, *optional*):
585
+ Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
586
+ the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left
587
+ to None, coordinates will not be rescaled.
588
+
589
+ Returns:
590
+ `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with
591
+ boxed and possible rescaled coordinates.
592
+ """
593
+
594
+ def scale_factor_to_fit(original_size, target_size=None):
595
+ height, width = original_size
596
+ if target_size is None:
597
+ max_height = self.image_processor.size["height"]
598
+ max_width = self.image_processor.size["width"]
599
+ else:
600
+ max_height, max_width = target_size
601
+ if width <= max_width and height <= max_height:
602
+ return 1.0
603
+ return min(max_height / height, max_width / width)
604
+
605
+ def find_delimiters_pair(tokens, start_token, end_token):
606
+ start_id = self.tokenizer.convert_tokens_to_ids(start_token)
607
+ end_id = self.tokenizer.convert_tokens_to_ids(end_token)
608
+
609
+ starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0]
610
+ ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0]
611
+
612
+ if torch.any(starting_positions) and torch.any(ending_positions):
613
+ return (starting_positions[0], ending_positions[0])
614
+ return (None, None)
615
+
616
+ def tokens_to_boxes(tokens, original_size):
617
+ while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != (
618
+ None,
619
+ None,
620
+ ):
621
+ start, end = pair
622
+ if end != start + 5:
623
+ continue
624
+
625
+ # Retrieve transformed coordinates from tokens
626
+ coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
627
+
628
+ # Scale back to original image size and multiply by 2
629
+ scale = scale_factor_to_fit(original_size)
630
+ top, left, bottom, right = [2 * int(float(c) / scale) for c in coords]
631
+
632
+ # Replace the IDs so they get detokenized right
633
+ replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}"
634
+ replacement = self.tokenizer.tokenize(replacement)[1:]
635
+ replacement = self.tokenizer.convert_tokens_to_ids(replacement)
636
+ replacement = torch.tensor(replacement).to(tokens)
637
+
638
+ tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
639
+ return tokens
640
+
641
+ def tokens_to_points(tokens, original_size):
642
+ while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != (
643
+ None,
644
+ None,
645
+ ):
646
+ start, end = pair
647
+ if end != start + 3:
648
+ continue
649
+
650
+ # Retrieve transformed coordinates from tokens
651
+ coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
652
+
653
+ # Scale back to original image size and multiply by 2
654
+ scale = scale_factor_to_fit(original_size)
655
+ x, y = [2 * int(float(c) / scale) for c in coords]
656
+
657
+ # Replace the IDs so they get detokenized right
658
+ replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}"
659
+ replacement = self.tokenizer.tokenize(replacement)[1:]
660
+ replacement = self.tokenizer.convert_tokens_to_ids(replacement)
661
+ replacement = torch.tensor(replacement).to(tokens)
662
+
663
+ tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
664
+ return tokens
665
+
666
+ if target_sizes is None:
667
+ target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs)
668
+ elif target_sizes.shape[1] != 2:
669
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
670
+
671
+ if len(outputs) != len(target_sizes):
672
+ raise ValueError("Make sure that you pass in as many target sizes as output sequences")
673
+
674
+ results = []
675
+ for seq, size in zip(outputs, target_sizes):
676
+ seq = tokens_to_boxes(seq, size)
677
+ seq = tokens_to_points(seq, size)
678
+ results.append(seq)
679
+
680
+ return results
681
+
682
+ def batch_decode(self, *args, **kwargs):
683
+ """
684
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
685
+ refer to the docstring of this method for more information.
686
+ """
687
+ return self.tokenizer.batch_decode(*args, **kwargs)
688
+
689
+ def decode(self, *args, **kwargs):
690
+ """
691
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
692
+ the docstring of this method for more information.
693
+ """
694
+ return self.tokenizer.decode(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/hubert/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_hubert"] = [
28
+ "HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "HubertForCTC",
30
+ "HubertForSequenceClassification",
31
+ "HubertModel",
32
+ "HubertPreTrainedModel",
33
+ ]
34
+
35
+
36
+ try:
37
+ if not is_tf_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_tf_hubert"] = [
43
+ "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "TFHubertForCTC",
45
+ "TFHubertModel",
46
+ "TFHubertPreTrainedModel",
47
+ ]
48
+
49
+ if TYPE_CHECKING:
50
+ from .configuration_hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .modeling_hubert import (
59
+ HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
60
+ HubertForCTC,
61
+ HubertForSequenceClassification,
62
+ HubertModel,
63
+ HubertPreTrainedModel,
64
+ )
65
+
66
+ try:
67
+ if not is_tf_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_tf_hubert import (
73
+ TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
74
+ TFHubertForCTC,
75
+ TFHubertModel,
76
+ TFHubertPreTrainedModel,
77
+ )
78
+
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/configuration_hubert.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.91 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/hubert/__pycache__/convert_hubert_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.18 kB). View file