applied-ai-018 commited on
Commit
0a5e9a0
·
verified ·
1 Parent(s): 4776532

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/16.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/16.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/16.input_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step40/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/16.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  9. ckpts/universal/global_step40/zero/29.vocab_parallel_projection.weight/fp32.pt +3 -0
  10. ckpts/universal/global_step40/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  11. ckpts/universal/global_step40/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  12. ckpts/universal/global_step40/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  13. venv/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/models/deta/__init__.py +73 -0
  15. venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/__init__.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/configuration_deta.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_resnet_to_pytorch.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_swin_to_pytorch.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/image_processing_deta.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.py +271 -0
  22. venv/lib/python3.10/site-packages/transformers/models/deta/convert_deta_resnet_to_pytorch.py +320 -0
  23. venv/lib/python3.10/site-packages/transformers/models/deta/convert_deta_swin_to_pytorch.py +327 -0
  24. venv/lib/python3.10/site-packages/transformers/models/deta/image_processing_deta.py +1174 -0
  25. venv/lib/python3.10/site-packages/transformers/models/deta/modeling_deta.py +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py +57 -0
  27. venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx +107 -0
  32. venv/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py +134 -0
  33. venv/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py +218 -0
  34. venv/lib/python3.10/site-packages/transformers/models/graphormer/modeling_graphormer.py +911 -0
  35. venv/lib/python3.10/site-packages/transformers/models/levit/__init__.py +73 -0
  36. venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/configuration_levit.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/convert_levit_timm_to_pytorch.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/feature_extraction_levit.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/image_processing_levit.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/modeling_levit.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py +144 -0
  43. venv/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py +181 -0
  44. venv/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py +33 -0
  45. venv/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py +325 -0
  46. venv/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py +737 -0
  47. venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/feature_extraction_markuplm.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/modeling_markuplm.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/processing_markuplm.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/16.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c10d335a38acb59903e64e6078df215e5947624c6c1a571c83d91b71127e43d
3
+ size 9372
ckpts/universal/global_step40/zero/16.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:263acf41930a49424f558ddff587251406de889a4443012a0985793043230cd3
3
+ size 9387
ckpts/universal/global_step40/zero/16.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7d4d8f5dea359358a0e4f4ca0519806ab718601ad0d523317d9647eceb68580
3
+ size 9293
ckpts/universal/global_step40/zero/16.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:164f2ed5a1fe3cc0a6a01588b23c43239055299d5400e77d322f96dd702fe6ec
3
+ size 33555612
ckpts/universal/global_step40/zero/16.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b27a94adcab62f72fb0029b4e99d573abb5ca5caa9cff90f070c8d81ae28738
3
+ size 33555627
ckpts/universal/global_step40/zero/16.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf7099cfc1bd578036ff58bd96abe0572a94a61f98d83090a2a311c6a8c6636a
3
+ size 33555533
ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb05d52fc2c8957659a72f0115837c72c93720c0bf4b775d66a5b6fef32f133b
3
+ size 33555627
ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:696668db8686e91aaebfbb55fa18a4e596c52d1730c391db7a017f901cfb922a
3
+ size 33555533
ckpts/universal/global_step40/zero/29.vocab_parallel_projection.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b026997d76340f56cae397ecb317806fab777c1098cb17771762b4b19c59f170
3
+ size 415237197
ckpts/universal/global_step40/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d09450710877116dd51888d1a724c0a58faf758554651d817d7f66c1d53c9323
3
+ size 33555612
ckpts/universal/global_step40/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e246f71c62ab4c6d03eaa39f0518ba2c4e871a293f8ec77e239740c690e39a5d
3
+ size 33555627
ckpts/universal/global_step40/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a4976cf6ec5f2bf5ff5f4cf4374849d4bfdde9620fda9788e3a5cf9f1dc552b
3
+ size 33555533
venv/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.11 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_deta"] = ["DetaImageProcessor"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_deta"] = [
39
+ "DETA_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "DetaForObjectDetection",
41
+ "DetaModel",
42
+ "DetaPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_deta import DetaImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_deta import (
64
+ DETA_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ DetaForObjectDetection,
66
+ DetaModel,
67
+ DetaPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/configuration_deta.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_resnet_to_pytorch.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_swin_to_pytorch.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/image_processing_deta.cpython-310.pyc ADDED
Binary file (39.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc ADDED
Binary file (100 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DETA model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class DetaConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the DETA
34
+ [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
41
+ The configuration of the backbone model.
42
+ backbone (`str`, *optional*):
43
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
44
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
45
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
46
+ use_pretrained_backbone (`bool`, *optional*, `False`):
47
+ Whether to use pretrained weights for the backbone.
48
+ use_timm_backbone (`bool`, *optional*, `False`):
49
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
50
+ library.
51
+ backbone_kwargs (`dict`, *optional*):
52
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
53
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
54
+ num_queries (`int`, *optional*, defaults to 900):
55
+ Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
56
+ detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
57
+ d_model (`int`, *optional*, defaults to 256):
58
+ Dimension of the layers.
59
+ encoder_layers (`int`, *optional*, defaults to 6):
60
+ Number of encoder layers.
61
+ decoder_layers (`int`, *optional*, defaults to 6):
62
+ Number of decoder layers.
63
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
64
+ Number of attention heads for each attention layer in the Transformer encoder.
65
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
66
+ Number of attention heads for each attention layer in the Transformer decoder.
67
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
68
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
69
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
70
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
71
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
72
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
73
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
74
+ dropout (`float`, *optional*, defaults to 0.1):
75
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
76
+ attention_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout ratio for the attention probabilities.
78
+ activation_dropout (`float`, *optional*, defaults to 0.0):
79
+ The dropout ratio for activations inside the fully connected layer.
80
+ init_std (`float`, *optional*, defaults to 0.02):
81
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
82
+ init_xavier_std (`float`, *optional*, defaults to 1):
83
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
84
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
85
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
86
+ for more details.
87
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
88
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
89
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
90
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
91
+ class_cost (`float`, *optional*, defaults to 1):
92
+ Relative weight of the classification error in the Hungarian matching cost.
93
+ bbox_cost (`float`, *optional*, defaults to 5):
94
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
95
+ giou_cost (`float`, *optional*, defaults to 2):
96
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
97
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
98
+ Relative weight of the Focal loss in the panoptic segmentation loss.
99
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
100
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
101
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
102
+ Relative weight of the L1 bounding box loss in the object detection loss.
103
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
104
+ Relative weight of the generalized IoU loss in the object detection loss.
105
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
106
+ Relative classification weight of the 'no-object' class in the object detection loss.
107
+ num_feature_levels (`int`, *optional*, defaults to 5):
108
+ The number of input feature levels.
109
+ encoder_n_points (`int`, *optional*, defaults to 4):
110
+ The number of sampled keys in each feature level for each attention head in the encoder.
111
+ decoder_n_points (`int`, *optional*, defaults to 4):
112
+ The number of sampled keys in each feature level for each attention head in the decoder.
113
+ two_stage (`bool`, *optional*, defaults to `True`):
114
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
115
+ DETA, which are further fed into the decoder for iterative bounding box refinement.
116
+ two_stage_num_proposals (`int`, *optional*, defaults to 300):
117
+ The number of region proposals to be generated, in case `two_stage` is set to `True`.
118
+ with_box_refine (`bool`, *optional*, defaults to `True`):
119
+ Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
120
+ based on the predictions from the previous layer.
121
+ focal_alpha (`float`, *optional*, defaults to 0.25):
122
+ Alpha parameter in the focal loss.
123
+ assign_first_stage (`bool`, *optional*, defaults to `True`):
124
+ Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7.
125
+ assign_second_stage (`bool`, *optional*, defaults to `True`):
126
+ Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure.
127
+ disable_custom_kernels (`bool`, *optional*, defaults to `True`):
128
+ Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
129
+ kernels are not supported by PyTorch ONNX export.
130
+
131
+ Examples:
132
+
133
+ ```python
134
+ >>> from transformers import DetaConfig, DetaModel
135
+
136
+ >>> # Initializing a DETA SenseTime/deformable-detr style configuration
137
+ >>> configuration = DetaConfig()
138
+
139
+ >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
140
+ >>> model = DetaModel(configuration)
141
+
142
+ >>> # Accessing the model configuration
143
+ >>> configuration = model.config
144
+ ```"""
145
+
146
+ model_type = "deta"
147
+ attribute_map = {
148
+ "hidden_size": "d_model",
149
+ "num_attention_heads": "encoder_attention_heads",
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ backbone_config=None,
155
+ backbone=None,
156
+ use_pretrained_backbone=False,
157
+ use_timm_backbone=False,
158
+ backbone_kwargs=None,
159
+ num_queries=900,
160
+ max_position_embeddings=2048,
161
+ encoder_layers=6,
162
+ encoder_ffn_dim=2048,
163
+ encoder_attention_heads=8,
164
+ decoder_layers=6,
165
+ decoder_ffn_dim=1024,
166
+ decoder_attention_heads=8,
167
+ encoder_layerdrop=0.0,
168
+ is_encoder_decoder=True,
169
+ activation_function="relu",
170
+ d_model=256,
171
+ dropout=0.1,
172
+ attention_dropout=0.0,
173
+ activation_dropout=0.0,
174
+ init_std=0.02,
175
+ init_xavier_std=1.0,
176
+ return_intermediate=True,
177
+ auxiliary_loss=False,
178
+ position_embedding_type="sine",
179
+ num_feature_levels=5,
180
+ encoder_n_points=4,
181
+ decoder_n_points=4,
182
+ two_stage=True,
183
+ two_stage_num_proposals=300,
184
+ with_box_refine=True,
185
+ assign_first_stage=True,
186
+ assign_second_stage=True,
187
+ class_cost=1,
188
+ bbox_cost=5,
189
+ giou_cost=2,
190
+ mask_loss_coefficient=1,
191
+ dice_loss_coefficient=1,
192
+ bbox_loss_coefficient=5,
193
+ giou_loss_coefficient=2,
194
+ eos_coefficient=0.1,
195
+ focal_alpha=0.25,
196
+ disable_custom_kernels=True,
197
+ **kwargs,
198
+ ):
199
+ if use_pretrained_backbone:
200
+ raise ValueError("Pretrained backbones are not supported yet.")
201
+
202
+ if backbone_config is not None and backbone is not None:
203
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
204
+
205
+ if backbone_config is None and backbone is None:
206
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
207
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
208
+ else:
209
+ if isinstance(backbone_config, dict):
210
+ backbone_model_type = backbone_config.pop("model_type")
211
+ config_class = CONFIG_MAPPING[backbone_model_type]
212
+ backbone_config = config_class.from_dict(backbone_config)
213
+
214
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
215
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
216
+
217
+ self.backbone_config = backbone_config
218
+ self.backbone = backbone
219
+ self.use_pretrained_backbone = use_pretrained_backbone
220
+ self.use_timm_backbone = use_timm_backbone
221
+ self.backbone_kwargs = backbone_kwargs
222
+ self.num_queries = num_queries
223
+ self.max_position_embeddings = max_position_embeddings
224
+ self.d_model = d_model
225
+ self.encoder_ffn_dim = encoder_ffn_dim
226
+ self.encoder_layers = encoder_layers
227
+ self.encoder_attention_heads = encoder_attention_heads
228
+ self.decoder_ffn_dim = decoder_ffn_dim
229
+ self.decoder_layers = decoder_layers
230
+ self.decoder_attention_heads = decoder_attention_heads
231
+ self.dropout = dropout
232
+ self.attention_dropout = attention_dropout
233
+ self.activation_dropout = activation_dropout
234
+ self.activation_function = activation_function
235
+ self.init_std = init_std
236
+ self.init_xavier_std = init_xavier_std
237
+ self.encoder_layerdrop = encoder_layerdrop
238
+ self.auxiliary_loss = auxiliary_loss
239
+ self.position_embedding_type = position_embedding_type
240
+ # deformable attributes
241
+ self.num_feature_levels = num_feature_levels
242
+ self.encoder_n_points = encoder_n_points
243
+ self.decoder_n_points = decoder_n_points
244
+ self.two_stage = two_stage
245
+ self.two_stage_num_proposals = two_stage_num_proposals
246
+ self.with_box_refine = with_box_refine
247
+ self.assign_first_stage = assign_first_stage
248
+ self.assign_second_stage = assign_second_stage
249
+ if two_stage is True and with_box_refine is False:
250
+ raise ValueError("If two_stage is True, with_box_refine must be True.")
251
+ # Hungarian matcher
252
+ self.class_cost = class_cost
253
+ self.bbox_cost = bbox_cost
254
+ self.giou_cost = giou_cost
255
+ # Loss coefficients
256
+ self.mask_loss_coefficient = mask_loss_coefficient
257
+ self.dice_loss_coefficient = dice_loss_coefficient
258
+ self.bbox_loss_coefficient = bbox_loss_coefficient
259
+ self.giou_loss_coefficient = giou_loss_coefficient
260
+ self.eos_coefficient = eos_coefficient
261
+ self.focal_alpha = focal_alpha
262
+ self.disable_custom_kernels = disable_custom_kernels
263
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
264
+
265
+ @property
266
+ def num_attention_heads(self) -> int:
267
+ return self.encoder_attention_heads
268
+
269
+ @property
270
+ def hidden_size(self) -> int:
271
+ return self.d_model
venv/lib/python3.10/site-packages/transformers/models/deta/convert_deta_resnet_to_pytorch.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETA checkpoints from the original repository.
16
+
17
+ URL: https://github.com/jozhang97/DETA/tree/master"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
27
+ from PIL import Image
28
+
29
+ from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_deta_config():
38
+ config = DetaConfig(
39
+ num_queries=900,
40
+ encoder_ffn_dim=2048,
41
+ decoder_ffn_dim=2048,
42
+ num_feature_levels=5,
43
+ assign_first_stage=True,
44
+ with_box_refine=True,
45
+ two_stage=True,
46
+ )
47
+
48
+ # set labels
49
+ config.num_labels = 91
50
+ repo_id = "huggingface/label-files"
51
+ filename = "coco-detection-id2label.json"
52
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
53
+ id2label = {int(k): v for k, v in id2label.items()}
54
+ config.id2label = id2label
55
+ config.label2id = {v: k for k, v in id2label.items()}
56
+
57
+ return config
58
+
59
+
60
+ # here we list all keys to be renamed (original name on the left, our name on the right)
61
+ def create_rename_keys(config):
62
+ rename_keys = []
63
+
64
+ # stem
65
+ # fmt: off
66
+ rename_keys.append(("backbone.0.body.conv1.weight", "model.backbone.model.embedder.embedder.convolution.weight"))
67
+ rename_keys.append(("backbone.0.body.bn1.weight", "model.backbone.model.embedder.embedder.normalization.weight"))
68
+ rename_keys.append(("backbone.0.body.bn1.bias", "model.backbone.model.embedder.embedder.normalization.bias"))
69
+ rename_keys.append(("backbone.0.body.bn1.running_mean", "model.backbone.model.embedder.embedder.normalization.running_mean"))
70
+ rename_keys.append(("backbone.0.body.bn1.running_var", "model.backbone.model.embedder.embedder.normalization.running_var"))
71
+ # stages
72
+ for stage_idx in range(len(config.backbone_config.depths)):
73
+ for layer_idx in range(config.backbone_config.depths[stage_idx]):
74
+ # shortcut
75
+ if layer_idx == 0:
76
+ rename_keys.append(
77
+ (
78
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
79
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
80
+ )
81
+ )
82
+ rename_keys.append(
83
+ (
84
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
85
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
86
+ )
87
+ )
88
+ rename_keys.append(
89
+ (
90
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
91
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
92
+ )
93
+ )
94
+ rename_keys.append(
95
+ (
96
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
97
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
98
+ )
99
+ )
100
+ rename_keys.append(
101
+ (
102
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
103
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
104
+ )
105
+ )
106
+ # 3 convs
107
+ for i in range(3):
108
+ rename_keys.append(
109
+ (
110
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
111
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
112
+ )
113
+ )
114
+ rename_keys.append(
115
+ (
116
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
117
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
118
+ )
119
+ )
120
+ rename_keys.append(
121
+ (
122
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
123
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
124
+ )
125
+ )
126
+ rename_keys.append(
127
+ (
128
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
129
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
130
+ )
131
+ )
132
+ rename_keys.append(
133
+ (
134
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
135
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
136
+ )
137
+ )
138
+ # transformer encoder
139
+ for i in range(config.encoder_layers):
140
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
141
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
142
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
143
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
144
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
145
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
146
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
147
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
148
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
149
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
150
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
151
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
152
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
153
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
154
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
155
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
156
+
157
+ # transformer decoder
158
+ for i in range(config.decoder_layers):
159
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
160
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
161
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
162
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
163
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
164
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
165
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
166
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
167
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
168
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
169
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
170
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
171
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
172
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
173
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
174
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
175
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
176
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
177
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
178
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
179
+
180
+ # fmt: on
181
+
182
+ return rename_keys
183
+
184
+
185
+ def rename_key(dct, old, new):
186
+ val = dct.pop(old)
187
+ dct[new] = val
188
+
189
+
190
+ def read_in_decoder_q_k_v(state_dict, config):
191
+ # transformer decoder self-attention layers
192
+ hidden_size = config.d_model
193
+ for i in range(config.decoder_layers):
194
+ # read in weights + bias of input projection layer of self-attention
195
+ in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
196
+ in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
197
+ # next, add query, keys and values (in that order) to the state dict
198
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :]
199
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size]
200
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
201
+ hidden_size : hidden_size * 2, :
202
+ ]
203
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
204
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :]
205
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:]
206
+
207
+
208
+ # We will verify our results on an image of cute cats
209
+ def prepare_img():
210
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
211
+ im = Image.open(requests.get(url, stream=True).raw)
212
+
213
+ return im
214
+
215
+
216
+ @torch.no_grad()
217
+ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
218
+ """
219
+ Copy/paste/tweak model's weights to our DETA structure.
220
+ """
221
+
222
+ # load config
223
+ config = get_deta_config()
224
+
225
+ # load original state dict
226
+ if model_name == "deta-resnet-50":
227
+ filename = "adet_checkpoint0011.pth"
228
+ elif model_name == "deta-resnet-50-24-epochs":
229
+ filename = "adet_2x_checkpoint0023.pth"
230
+ else:
231
+ raise ValueError(f"Model name {model_name} not supported")
232
+ checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename=filename)
233
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
234
+
235
+ # rename keys
236
+ rename_keys = create_rename_keys(config)
237
+ for src, dest in rename_keys:
238
+ rename_key(state_dict, src, dest)
239
+ read_in_decoder_q_k_v(state_dict, config)
240
+
241
+ # fix some prefixes
242
+ for key in state_dict.copy().keys():
243
+ if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
244
+ val = state_dict.pop(key)
245
+ state_dict[key.replace("transformer.decoder", "model.decoder")] = val
246
+ if "input_proj" in key:
247
+ val = state_dict.pop(key)
248
+ state_dict["model." + key] = val
249
+ if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
250
+ val = state_dict.pop(key)
251
+ state_dict[key.replace("transformer", "model")] = val
252
+
253
+ # finally, create HuggingFace model and load state dict
254
+ model = DetaForObjectDetection(config)
255
+ model.load_state_dict(state_dict)
256
+ model.eval()
257
+
258
+ device = "cuda" if torch.cuda.is_available() else "cpu"
259
+ model.to(device)
260
+
261
+ # load image processor
262
+ processor = DetaImageProcessor(format="coco_detection")
263
+
264
+ # verify our conversion on image
265
+ img = prepare_img()
266
+ encoding = processor(images=img, return_tensors="pt")
267
+ pixel_values = encoding["pixel_values"]
268
+ outputs = model(pixel_values.to(device))
269
+
270
+ # verify logits
271
+ if model_name == "deta-resnet-50":
272
+ expected_logits = torch.tensor(
273
+ [[-7.3978, -2.5406, -4.1668], [-8.2684, -3.9933, -3.8096], [-7.0515, -3.7973, -5.8516]]
274
+ )
275
+ expected_boxes = torch.tensor([[0.5043, 0.4973, 0.9998], [0.2542, 0.5489, 0.4748], [0.5490, 0.2765, 0.0570]])
276
+ elif model_name == "deta-resnet-50-24-epochs":
277
+ expected_logits = torch.tensor(
278
+ [[-7.1688, -2.4857, -4.8669], [-7.8630, -3.8154, -4.2674], [-7.2730, -4.1865, -5.5323]]
279
+ )
280
+ expected_boxes = torch.tensor([[0.5021, 0.4971, 0.9994], [0.2546, 0.5486, 0.4731], [0.1686, 0.1986, 0.2142]])
281
+
282
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
283
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
284
+ print("Everything ok!")
285
+
286
+ if pytorch_dump_folder_path:
287
+ # Save model and processor
288
+ logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
289
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
290
+ model.save_pretrained(pytorch_dump_folder_path)
291
+ processor.save_pretrained(pytorch_dump_folder_path)
292
+
293
+ # Push to hub
294
+ if push_to_hub:
295
+ print("Pushing model and processor to hub...")
296
+ model.push_to_hub(f"jozhang97/{model_name}")
297
+ processor.push_to_hub(f"jozhang97/{model_name}")
298
+
299
+
300
+ if __name__ == "__main__":
301
+ parser = argparse.ArgumentParser()
302
+
303
+ parser.add_argument(
304
+ "--model_name",
305
+ type=str,
306
+ default="deta-resnet-50",
307
+ choices=["deta-resnet-50", "deta-resnet-50-24-epochs"],
308
+ help="Name of the model you'd like to convert.",
309
+ )
310
+ parser.add_argument(
311
+ "--pytorch_dump_folder_path",
312
+ default=None,
313
+ type=str,
314
+ help="Path to the folder to output PyTorch model.",
315
+ )
316
+ parser.add_argument(
317
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
318
+ )
319
+ args = parser.parse_args()
320
+ convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/deta/convert_deta_swin_to_pytorch.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETA checkpoints from the original repository.
16
+
17
+ URL: https://github.com/jozhang97/DETA/tree/master"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
27
+ from PIL import Image
28
+
29
+ from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_deta_config(model_name):
38
+ backbone_config = SwinConfig(
39
+ embed_dim=192,
40
+ depths=(2, 2, 18, 2),
41
+ num_heads=(6, 12, 24, 48),
42
+ window_size=12,
43
+ out_features=["stage2", "stage3", "stage4"],
44
+ )
45
+
46
+ config = DetaConfig(
47
+ backbone_config=backbone_config,
48
+ num_queries=900,
49
+ encoder_ffn_dim=2048,
50
+ decoder_ffn_dim=2048,
51
+ num_feature_levels=5,
52
+ assign_first_stage=True,
53
+ with_box_refine=True,
54
+ two_stage=True,
55
+ )
56
+
57
+ # set labels
58
+ repo_id = "huggingface/label-files"
59
+ if "o365" in model_name:
60
+ num_labels = 366
61
+ filename = "object365-id2label.json"
62
+ else:
63
+ num_labels = 91
64
+ filename = "coco-detection-id2label.json"
65
+
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ config.id2label = id2label
70
+ config.label2id = {v: k for k, v in id2label.items()}
71
+
72
+ return config
73
+
74
+
75
+ # here we list all keys to be renamed (original name on the left, our name on the right)
76
+ def create_rename_keys(config):
77
+ rename_keys = []
78
+
79
+ # stem
80
+ # fmt: off
81
+ rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight"))
82
+ rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias"))
83
+ rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight"))
84
+ rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias"))
85
+ # stages
86
+ for i in range(len(config.backbone_config.depths)):
87
+ for j in range(config.backbone_config.depths[i]):
88
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
89
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
90
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
91
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
92
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
93
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
94
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
95
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
96
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
97
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
98
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
99
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
100
+
101
+ if i < 3:
102
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
103
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
104
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
105
+
106
+ rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight"))
107
+ rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias"))
108
+ rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight"))
109
+ rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias"))
110
+ rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight"))
111
+ rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias"))
112
+
113
+ # transformer encoder
114
+ for i in range(config.encoder_layers):
115
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
116
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
117
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
118
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
119
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
120
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
121
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
122
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
123
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
124
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
125
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
126
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
127
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
128
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
129
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
130
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
131
+
132
+ # transformer decoder
133
+ for i in range(config.decoder_layers):
134
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
135
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
136
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
137
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
138
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
139
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
140
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
141
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
142
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
143
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
144
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
145
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
146
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
147
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
148
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
149
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
150
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
151
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
152
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
153
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
154
+
155
+ # fmt: on
156
+
157
+ return rename_keys
158
+
159
+
160
+ def rename_key(dct, old, new):
161
+ val = dct.pop(old)
162
+ dct[new] = val
163
+
164
+
165
+ # we split up the matrix of each encoder layer into queries, keys and values
166
+ def read_in_swin_q_k_v(state_dict, backbone_config):
167
+ num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
168
+ for i in range(len(backbone_config.depths)):
169
+ dim = num_features[i]
170
+ for j in range(backbone_config.depths[i]):
171
+ # fmt: off
172
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
173
+ in_proj_weight = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
174
+ in_proj_bias = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
175
+ # next, add query, keys and values (in that order) to the state dict
176
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :]
177
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim]
178
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[
179
+ dim : dim * 2, :
180
+ ]
181
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[
182
+ dim : dim * 2
183
+ ]
184
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[
185
+ -dim :, :
186
+ ]
187
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :]
188
+ # fmt: on
189
+
190
+
191
+ def read_in_decoder_q_k_v(state_dict, config):
192
+ # transformer decoder self-attention layers
193
+ hidden_size = config.d_model
194
+ for i in range(config.decoder_layers):
195
+ # read in weights + bias of input projection layer of self-attention
196
+ in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
197
+ in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
198
+ # next, add query, keys and values (in that order) to the state dict
199
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :]
200
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size]
201
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
202
+ hidden_size : hidden_size * 2, :
203
+ ]
204
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
205
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :]
206
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:]
207
+
208
+
209
+ # We will verify our results on an image of cute cats
210
+ def prepare_img():
211
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
212
+ im = Image.open(requests.get(url, stream=True).raw)
213
+
214
+ return im
215
+
216
+
217
+ @torch.no_grad()
218
+ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
219
+ """
220
+ Copy/paste/tweak model's weights to our DETA structure.
221
+ """
222
+
223
+ # load config
224
+ config = get_deta_config(model_name)
225
+
226
+ # load original state dict
227
+ if model_name == "deta-swin-large":
228
+ checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth")
229
+ elif model_name == "deta-swin-large-o365":
230
+ checkpoint_path = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth")
231
+ else:
232
+ raise ValueError(f"Model name {model_name} not supported")
233
+
234
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
235
+
236
+ # original state dict
237
+ for name, param in state_dict.items():
238
+ print(name, param.shape)
239
+
240
+ # rename keys
241
+ rename_keys = create_rename_keys(config)
242
+ for src, dest in rename_keys:
243
+ rename_key(state_dict, src, dest)
244
+ read_in_swin_q_k_v(state_dict, config.backbone_config)
245
+ read_in_decoder_q_k_v(state_dict, config)
246
+
247
+ # fix some prefixes
248
+ for key in state_dict.copy().keys():
249
+ if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
250
+ val = state_dict.pop(key)
251
+ state_dict[key.replace("transformer.decoder", "model.decoder")] = val
252
+ if "input_proj" in key:
253
+ val = state_dict.pop(key)
254
+ state_dict["model." + key] = val
255
+ if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
256
+ val = state_dict.pop(key)
257
+ state_dict[key.replace("transformer", "model")] = val
258
+
259
+ # finally, create HuggingFace model and load state dict
260
+ model = DetaForObjectDetection(config)
261
+ model.load_state_dict(state_dict)
262
+ model.eval()
263
+
264
+ device = "cuda" if torch.cuda.is_available() else "cpu"
265
+ model.to(device)
266
+
267
+ # load image processor
268
+ processor = DetaImageProcessor(format="coco_detection")
269
+
270
+ # verify our conversion on image
271
+ img = prepare_img()
272
+ encoding = processor(images=img, return_tensors="pt")
273
+ pixel_values = encoding["pixel_values"]
274
+ outputs = model(pixel_values.to(device))
275
+
276
+ # verify logits
277
+ print("Logits:", outputs.logits[0, :3, :3])
278
+ print("Boxes:", outputs.pred_boxes[0, :3, :3])
279
+ if model_name == "deta-swin-large":
280
+ expected_logits = torch.tensor(
281
+ [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]]
282
+ )
283
+ expected_boxes = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
284
+ elif model_name == "deta-swin-large-o365":
285
+ expected_logits = torch.tensor(
286
+ [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]]
287
+ )
288
+ expected_boxes = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
289
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
290
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
291
+ print("Everything ok!")
292
+
293
+ if pytorch_dump_folder_path:
294
+ # Save model and processor
295
+ logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
296
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
297
+ model.save_pretrained(pytorch_dump_folder_path)
298
+ processor.save_pretrained(pytorch_dump_folder_path)
299
+
300
+ # Push to hub
301
+ if push_to_hub:
302
+ print("Pushing model and processor to hub...")
303
+ model.push_to_hub(f"jozhang97/{model_name}")
304
+ processor.push_to_hub(f"jozhang97/{model_name}")
305
+
306
+
307
+ if __name__ == "__main__":
308
+ parser = argparse.ArgumentParser()
309
+
310
+ parser.add_argument(
311
+ "--model_name",
312
+ type=str,
313
+ default="deta-swin-large",
314
+ choices=["deta-swin-large", "deta-swin-large-o365"],
315
+ help="Name of the model you'd like to convert.",
316
+ )
317
+ parser.add_argument(
318
+ "--pytorch_dump_folder_path",
319
+ default=None,
320
+ type=str,
321
+ help="Path to the folder to output PyTorch model.",
322
+ )
323
+ parser.add_argument(
324
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
325
+ )
326
+ args = parser.parse_args()
327
+ convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/deta/image_processing_deta.py ADDED
@@ -0,0 +1,1174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Deformable DETR."""
16
+
17
+ import pathlib
18
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
24
+ from ...image_transforms import (
25
+ PaddingMode,
26
+ center_to_corners_format,
27
+ corners_to_center_format,
28
+ pad,
29
+ rescale,
30
+ resize,
31
+ rgb_to_id,
32
+ to_channel_dimension_format,
33
+ )
34
+ from ...image_utils import (
35
+ IMAGENET_DEFAULT_MEAN,
36
+ IMAGENET_DEFAULT_STD,
37
+ AnnotationFormat,
38
+ AnnotationType,
39
+ ChannelDimension,
40
+ ImageInput,
41
+ PILImageResampling,
42
+ get_image_size,
43
+ infer_channel_dimension_format,
44
+ is_batched,
45
+ is_scaled_image,
46
+ to_numpy_array,
47
+ valid_images,
48
+ validate_annotations,
49
+ validate_preprocess_arguments,
50
+ )
51
+ from ...utils import (
52
+ is_flax_available,
53
+ is_jax_tensor,
54
+ is_tf_available,
55
+ is_tf_tensor,
56
+ is_torch_available,
57
+ is_torch_tensor,
58
+ is_torchvision_available,
59
+ is_vision_available,
60
+ logging,
61
+ )
62
+ from ...utils.generic import TensorType
63
+
64
+
65
+ if is_torch_available():
66
+ import torch
67
+
68
+
69
+ if is_torchvision_available():
70
+ from torchvision.ops.boxes import batched_nms
71
+
72
+ if is_vision_available():
73
+ import PIL
74
+
75
+
76
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
77
+
78
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
79
+
80
+
81
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
82
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
83
+ """
84
+ Computes the output image size given the input image size and the desired output size.
85
+
86
+ Args:
87
+ image_size (`Tuple[int, int]`):
88
+ The input image size.
89
+ size (`int`):
90
+ The desired output size.
91
+ max_size (`int`, *optional*):
92
+ The maximum allowed output size.
93
+ """
94
+ height, width = image_size
95
+ if max_size is not None:
96
+ min_original_size = float(min((height, width)))
97
+ max_original_size = float(max((height, width)))
98
+ if max_original_size / min_original_size * size > max_size:
99
+ size = int(round(max_size * min_original_size / max_original_size))
100
+
101
+ if (height <= width and height == size) or (width <= height and width == size):
102
+ return height, width
103
+
104
+ if width < height:
105
+ ow = size
106
+ oh = int(size * height / width)
107
+ else:
108
+ oh = size
109
+ ow = int(size * width / height)
110
+ return (oh, ow)
111
+
112
+
113
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
114
+ def get_resize_output_image_size(
115
+ input_image: np.ndarray,
116
+ size: Union[int, Tuple[int, int], List[int]],
117
+ max_size: Optional[int] = None,
118
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
119
+ ) -> Tuple[int, int]:
120
+ """
121
+ Computes the output image size given the input image size and the desired output size. If the desired output size
122
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
123
+ image size is computed by keeping the aspect ratio of the input image size.
124
+
125
+ Args:
126
+ input_image (`np.ndarray`):
127
+ The image to resize.
128
+ size (`int` or `Tuple[int, int]` or `List[int]`):
129
+ The desired output size.
130
+ max_size (`int`, *optional*):
131
+ The maximum allowed output size.
132
+ input_data_format (`ChannelDimension` or `str`, *optional*):
133
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
134
+ """
135
+ image_size = get_image_size(input_image, input_data_format)
136
+ if isinstance(size, (list, tuple)):
137
+ return size
138
+
139
+ return get_size_with_aspect_ratio(image_size, size, max_size)
140
+
141
+
142
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
143
+ def get_numpy_to_framework_fn(arr) -> Callable:
144
+ """
145
+ Returns a function that converts a numpy array to the framework of the input array.
146
+
147
+ Args:
148
+ arr (`np.ndarray`): The array to convert.
149
+ """
150
+ if isinstance(arr, np.ndarray):
151
+ return np.array
152
+ if is_tf_available() and is_tf_tensor(arr):
153
+ import tensorflow as tf
154
+
155
+ return tf.convert_to_tensor
156
+ if is_torch_available() and is_torch_tensor(arr):
157
+ import torch
158
+
159
+ return torch.tensor
160
+ if is_flax_available() and is_jax_tensor(arr):
161
+ import jax.numpy as jnp
162
+
163
+ return jnp.array
164
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
165
+
166
+
167
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
168
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
169
+ """
170
+ Squeezes an array, but only if the axis specified has dim 1.
171
+ """
172
+ if axis is None:
173
+ return arr.squeeze()
174
+
175
+ try:
176
+ return arr.squeeze(axis=axis)
177
+ except ValueError:
178
+ return arr
179
+
180
+
181
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
182
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
183
+ image_height, image_width = image_size
184
+ norm_annotation = {}
185
+ for key, value in annotation.items():
186
+ if key == "boxes":
187
+ boxes = value
188
+ boxes = corners_to_center_format(boxes)
189
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
190
+ norm_annotation[key] = boxes
191
+ else:
192
+ norm_annotation[key] = value
193
+ return norm_annotation
194
+
195
+
196
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
197
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
198
+ """
199
+ Return the maximum value across all indices of an iterable of values.
200
+ """
201
+ return [max(values_i) for values_i in zip(*values)]
202
+
203
+
204
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
205
+ def get_max_height_width(
206
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
207
+ ) -> List[int]:
208
+ """
209
+ Get the maximum height and width across all images in a batch.
210
+ """
211
+ if input_data_format is None:
212
+ input_data_format = infer_channel_dimension_format(images[0])
213
+
214
+ if input_data_format == ChannelDimension.FIRST:
215
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
216
+ elif input_data_format == ChannelDimension.LAST:
217
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
218
+ else:
219
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
220
+ return (max_height, max_width)
221
+
222
+
223
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
224
+ def make_pixel_mask(
225
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
226
+ ) -> np.ndarray:
227
+ """
228
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
229
+
230
+ Args:
231
+ image (`np.ndarray`):
232
+ Image to make the pixel mask for.
233
+ output_size (`Tuple[int, int]`):
234
+ Output size of the mask.
235
+ """
236
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
237
+ mask = np.zeros(output_size, dtype=np.int64)
238
+ mask[:input_height, :input_width] = 1
239
+ return mask
240
+
241
+
242
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
243
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
244
+ """
245
+ Convert a COCO polygon annotation to a mask.
246
+
247
+ Args:
248
+ segmentations (`List[List[float]]`):
249
+ List of polygons, each polygon represented by a list of x-y coordinates.
250
+ height (`int`):
251
+ Height of the mask.
252
+ width (`int`):
253
+ Width of the mask.
254
+ """
255
+ try:
256
+ from pycocotools import mask as coco_mask
257
+ except ImportError:
258
+ raise ImportError("Pycocotools is not installed in your environment.")
259
+
260
+ masks = []
261
+ for polygons in segmentations:
262
+ rles = coco_mask.frPyObjects(polygons, height, width)
263
+ mask = coco_mask.decode(rles)
264
+ if len(mask.shape) < 3:
265
+ mask = mask[..., None]
266
+ mask = np.asarray(mask, dtype=np.uint8)
267
+ mask = np.any(mask, axis=2)
268
+ masks.append(mask)
269
+ if masks:
270
+ masks = np.stack(masks, axis=0)
271
+ else:
272
+ masks = np.zeros((0, height, width), dtype=np.uint8)
273
+
274
+ return masks
275
+
276
+
277
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA
278
+ def prepare_coco_detection_annotation(
279
+ image,
280
+ target,
281
+ return_segmentation_masks: bool = False,
282
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
283
+ ):
284
+ """
285
+ Convert the target in COCO format into the format expected by DETA.
286
+ """
287
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
288
+
289
+ image_id = target["image_id"]
290
+ image_id = np.asarray([image_id], dtype=np.int64)
291
+
292
+ # Get all COCO annotations for the given image.
293
+ annotations = target["annotations"]
294
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
295
+
296
+ classes = [obj["category_id"] for obj in annotations]
297
+ classes = np.asarray(classes, dtype=np.int64)
298
+
299
+ # for conversion to coco api
300
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
301
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
302
+
303
+ boxes = [obj["bbox"] for obj in annotations]
304
+ # guard against no boxes via resizing
305
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
306
+ boxes[:, 2:] += boxes[:, :2]
307
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
308
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
309
+
310
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
311
+
312
+ new_target = {}
313
+ new_target["image_id"] = image_id
314
+ new_target["class_labels"] = classes[keep]
315
+ new_target["boxes"] = boxes[keep]
316
+ new_target["area"] = area[keep]
317
+ new_target["iscrowd"] = iscrowd[keep]
318
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
319
+
320
+ if annotations and "keypoints" in annotations[0]:
321
+ keypoints = [obj["keypoints"] for obj in annotations]
322
+ # Converting the filtered keypoints list to a numpy array
323
+ keypoints = np.asarray(keypoints, dtype=np.float32)
324
+ # Apply the keep mask here to filter the relevant annotations
325
+ keypoints = keypoints[keep]
326
+ num_keypoints = keypoints.shape[0]
327
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
328
+ new_target["keypoints"] = keypoints
329
+
330
+ if return_segmentation_masks:
331
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
332
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
333
+ new_target["masks"] = masks[keep]
334
+
335
+ return new_target
336
+
337
+
338
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
339
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
340
+ """
341
+ Compute the bounding boxes around the provided panoptic segmentation masks.
342
+
343
+ Args:
344
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
345
+
346
+ Returns:
347
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
348
+ """
349
+ if masks.size == 0:
350
+ return np.zeros((0, 4))
351
+
352
+ h, w = masks.shape[-2:]
353
+ y = np.arange(0, h, dtype=np.float32)
354
+ x = np.arange(0, w, dtype=np.float32)
355
+ # see https://github.com/pytorch/pytorch/issues/50276
356
+ y, x = np.meshgrid(y, x, indexing="ij")
357
+
358
+ x_mask = masks * np.expand_dims(x, axis=0)
359
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
360
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
361
+ x_min = x.filled(fill_value=1e8)
362
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
363
+
364
+ y_mask = masks * np.expand_dims(y, axis=0)
365
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
366
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
367
+ y_min = y.filled(fill_value=1e8)
368
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
369
+
370
+ return np.stack([x_min, y_min, x_max, y_max], 1)
371
+
372
+
373
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA
374
+ def prepare_coco_panoptic_annotation(
375
+ image: np.ndarray,
376
+ target: Dict,
377
+ masks_path: Union[str, pathlib.Path],
378
+ return_masks: bool = True,
379
+ input_data_format: Union[ChannelDimension, str] = None,
380
+ ) -> Dict:
381
+ """
382
+ Prepare a coco panoptic annotation for DETA.
383
+ """
384
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
385
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
386
+
387
+ new_target = {}
388
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
389
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
390
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
391
+
392
+ if "segments_info" in target:
393
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
394
+ masks = rgb_to_id(masks)
395
+
396
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
397
+ masks = masks == ids[:, None, None]
398
+ masks = masks.astype(np.uint8)
399
+ if return_masks:
400
+ new_target["masks"] = masks
401
+ new_target["boxes"] = masks_to_boxes(masks)
402
+ new_target["class_labels"] = np.array(
403
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
404
+ )
405
+ new_target["iscrowd"] = np.asarray(
406
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
407
+ )
408
+ new_target["area"] = np.asarray(
409
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
410
+ )
411
+
412
+ return new_target
413
+
414
+
415
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
416
+ def resize_annotation(
417
+ annotation: Dict[str, Any],
418
+ orig_size: Tuple[int, int],
419
+ target_size: Tuple[int, int],
420
+ threshold: float = 0.5,
421
+ resample: PILImageResampling = PILImageResampling.NEAREST,
422
+ ):
423
+ """
424
+ Resizes an annotation to a target size.
425
+
426
+ Args:
427
+ annotation (`Dict[str, Any]`):
428
+ The annotation dictionary.
429
+ orig_size (`Tuple[int, int]`):
430
+ The original size of the input image.
431
+ target_size (`Tuple[int, int]`):
432
+ The target size of the image, as returned by the preprocessing `resize` step.
433
+ threshold (`float`, *optional*, defaults to 0.5):
434
+ The threshold used to binarize the segmentation masks.
435
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
436
+ The resampling filter to use when resizing the masks.
437
+ """
438
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
439
+ ratio_height, ratio_width = ratios
440
+
441
+ new_annotation = {}
442
+ new_annotation["size"] = target_size
443
+
444
+ for key, value in annotation.items():
445
+ if key == "boxes":
446
+ boxes = value
447
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
448
+ new_annotation["boxes"] = scaled_boxes
449
+ elif key == "area":
450
+ area = value
451
+ scaled_area = area * (ratio_width * ratio_height)
452
+ new_annotation["area"] = scaled_area
453
+ elif key == "masks":
454
+ masks = value[:, None]
455
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
456
+ masks = masks.astype(np.float32)
457
+ masks = masks[:, 0] > threshold
458
+ new_annotation["masks"] = masks
459
+ elif key == "size":
460
+ new_annotation["size"] = target_size
461
+ else:
462
+ new_annotation[key] = value
463
+
464
+ return new_annotation
465
+
466
+
467
+ class DetaImageProcessor(BaseImageProcessor):
468
+ r"""
469
+ Constructs a Deformable DETR image processor.
470
+
471
+ Args:
472
+ format (`str`, *optional*, defaults to `"coco_detection"`):
473
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
474
+ do_resize (`bool`, *optional*, defaults to `True`):
475
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
476
+ overridden by the `do_resize` parameter in the `preprocess` method.
477
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
478
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
479
+ the `preprocess` method.
480
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
481
+ Resampling filter to use if resizing the image.
482
+ do_rescale (`bool`, *optional*, defaults to `True`):
483
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
484
+ `do_rescale` parameter in the `preprocess` method.
485
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
486
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
487
+ `preprocess` method.
488
+ do_normalize:
489
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
490
+ `preprocess` method.
491
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
492
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
493
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
494
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
495
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
496
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
497
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
498
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
499
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
500
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
501
+ do_pad (`bool`, *optional*, defaults to `True`):
502
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
503
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
504
+ Padding will be applied to the bottom and right of the image with zeros.
505
+ """
506
+
507
+ model_input_names = ["pixel_values", "pixel_mask"]
508
+
509
+ def __init__(
510
+ self,
511
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
512
+ do_resize: bool = True,
513
+ size: Dict[str, int] = None,
514
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
515
+ do_rescale: bool = True,
516
+ rescale_factor: Union[int, float] = 1 / 255,
517
+ do_normalize: bool = True,
518
+ image_mean: Union[float, List[float]] = None,
519
+ image_std: Union[float, List[float]] = None,
520
+ do_convert_annotations: bool = True,
521
+ do_pad: bool = True,
522
+ **kwargs,
523
+ ) -> None:
524
+ if "pad_and_return_pixel_mask" in kwargs:
525
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
526
+
527
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
528
+ size = get_size_dict(size, default_to_square=False)
529
+
530
+ if do_convert_annotations is None:
531
+ do_convert_annotations = do_normalize
532
+
533
+ super().__init__(**kwargs)
534
+ self.format = format
535
+ self.do_resize = do_resize
536
+ self.size = size
537
+ self.resample = resample
538
+ self.do_rescale = do_rescale
539
+ self.rescale_factor = rescale_factor
540
+ self.do_normalize = do_normalize
541
+ self.do_convert_annotations = do_convert_annotations
542
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
543
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
544
+ self.do_pad = do_pad
545
+
546
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA
547
+ def prepare_annotation(
548
+ self,
549
+ image: np.ndarray,
550
+ target: Dict,
551
+ format: Optional[AnnotationFormat] = None,
552
+ return_segmentation_masks: bool = None,
553
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
554
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
555
+ ) -> Dict:
556
+ """
557
+ Prepare an annotation for feeding into DETA model.
558
+ """
559
+ format = format if format is not None else self.format
560
+
561
+ if format == AnnotationFormat.COCO_DETECTION:
562
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
563
+ target = prepare_coco_detection_annotation(
564
+ image, target, return_segmentation_masks, input_data_format=input_data_format
565
+ )
566
+ elif format == AnnotationFormat.COCO_PANOPTIC:
567
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
568
+ target = prepare_coco_panoptic_annotation(
569
+ image,
570
+ target,
571
+ masks_path=masks_path,
572
+ return_masks=return_segmentation_masks,
573
+ input_data_format=input_data_format,
574
+ )
575
+ else:
576
+ raise ValueError(f"Format {format} is not supported.")
577
+ return target
578
+
579
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
580
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
581
+ logger.warning_once(
582
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
583
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
584
+ "does not return the image anymore.",
585
+ )
586
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
587
+ return image, target
588
+
589
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
590
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
591
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
592
+ return convert_coco_poly_to_mask(*args, **kwargs)
593
+
594
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
595
+ def prepare_coco_detection(self, *args, **kwargs):
596
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
597
+ return prepare_coco_detection_annotation(*args, **kwargs)
598
+
599
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
600
+ def prepare_coco_panoptic(self, *args, **kwargs):
601
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
602
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
603
+
604
+ def resize(
605
+ self,
606
+ image: np.ndarray,
607
+ size: Dict[str, int],
608
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
609
+ data_format: Optional[ChannelDimension] = None,
610
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
611
+ **kwargs,
612
+ ) -> np.ndarray:
613
+ """
614
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
615
+ int, smaller edge of the image will be matched to this number.
616
+
617
+ Args:
618
+ image (`np.ndarray`):
619
+ Image to resize.
620
+ size (`Dict[str, int]`):
621
+ The desired output size. Can contain keys `shortest_edge` and `longest_edge` or `height` and `width`.
622
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
623
+ Resampling filter to use if resizing the image.
624
+ data_format (`ChannelDimension`, *optional*):
625
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
626
+ image is used.
627
+ input_data_format (`ChannelDimension` or `str`, *optional*):
628
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
629
+ image.
630
+ """
631
+ size = get_size_dict(size, default_to_square=False)
632
+ if "shortest_edge" in size and "longest_edge" in size:
633
+ size = get_resize_output_image_size(
634
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
635
+ )
636
+ elif "height" in size and "width" in size:
637
+ size = (size["height"], size["width"])
638
+ else:
639
+ raise ValueError(
640
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
641
+ f" {size.keys()}."
642
+ )
643
+ image = resize(
644
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format
645
+ )
646
+ return image
647
+
648
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
649
+ def resize_annotation(
650
+ self,
651
+ annotation,
652
+ orig_size,
653
+ size,
654
+ resample: PILImageResampling = PILImageResampling.NEAREST,
655
+ ) -> Dict:
656
+ """
657
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
658
+ to this number.
659
+ """
660
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
661
+
662
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
663
+ def rescale(
664
+ self,
665
+ image: np.ndarray,
666
+ rescale_factor: float,
667
+ data_format: Optional[Union[str, ChannelDimension]] = None,
668
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
669
+ ) -> np.ndarray:
670
+ """
671
+ Rescale the image by the given factor. image = image * rescale_factor.
672
+
673
+ Args:
674
+ image (`np.ndarray`):
675
+ Image to rescale.
676
+ rescale_factor (`float`):
677
+ The value to use for rescaling.
678
+ data_format (`str` or `ChannelDimension`, *optional*):
679
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
680
+ image is used. Can be one of:
681
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
682
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
683
+ input_data_format (`str` or `ChannelDimension`, *optional*):
684
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
685
+ one of:
686
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
687
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
688
+ """
689
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
690
+
691
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
692
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
693
+ """
694
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
695
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
696
+ """
697
+ return normalize_annotation(annotation, image_size=image_size)
698
+
699
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
700
+ def _update_annotation_for_padded_image(
701
+ self,
702
+ annotation: Dict,
703
+ input_image_size: Tuple[int, int],
704
+ output_image_size: Tuple[int, int],
705
+ padding,
706
+ update_bboxes,
707
+ ) -> Dict:
708
+ """
709
+ Update the annotation for a padded image.
710
+ """
711
+ new_annotation = {}
712
+ new_annotation["size"] = output_image_size
713
+
714
+ for key, value in annotation.items():
715
+ if key == "masks":
716
+ masks = value
717
+ masks = pad(
718
+ masks,
719
+ padding,
720
+ mode=PaddingMode.CONSTANT,
721
+ constant_values=0,
722
+ input_data_format=ChannelDimension.FIRST,
723
+ )
724
+ masks = safe_squeeze(masks, 1)
725
+ new_annotation["masks"] = masks
726
+ elif key == "boxes" and update_bboxes:
727
+ boxes = value
728
+ boxes *= np.asarray(
729
+ [
730
+ input_image_size[1] / output_image_size[1],
731
+ input_image_size[0] / output_image_size[0],
732
+ input_image_size[1] / output_image_size[1],
733
+ input_image_size[0] / output_image_size[0],
734
+ ]
735
+ )
736
+ new_annotation["boxes"] = boxes
737
+ elif key == "size":
738
+ new_annotation["size"] = output_image_size
739
+ else:
740
+ new_annotation[key] = value
741
+ return new_annotation
742
+
743
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
744
+ def _pad_image(
745
+ self,
746
+ image: np.ndarray,
747
+ output_size: Tuple[int, int],
748
+ annotation: Optional[Dict[str, Any]] = None,
749
+ constant_values: Union[float, Iterable[float]] = 0,
750
+ data_format: Optional[ChannelDimension] = None,
751
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
752
+ update_bboxes: bool = True,
753
+ ) -> np.ndarray:
754
+ """
755
+ Pad an image with zeros to the given size.
756
+ """
757
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
758
+ output_height, output_width = output_size
759
+
760
+ pad_bottom = output_height - input_height
761
+ pad_right = output_width - input_width
762
+ padding = ((0, pad_bottom), (0, pad_right))
763
+ padded_image = pad(
764
+ image,
765
+ padding,
766
+ mode=PaddingMode.CONSTANT,
767
+ constant_values=constant_values,
768
+ data_format=data_format,
769
+ input_data_format=input_data_format,
770
+ )
771
+ if annotation is not None:
772
+ annotation = self._update_annotation_for_padded_image(
773
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
774
+ )
775
+ return padded_image, annotation
776
+
777
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
778
+ def pad(
779
+ self,
780
+ images: List[np.ndarray],
781
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
782
+ constant_values: Union[float, Iterable[float]] = 0,
783
+ return_pixel_mask: bool = True,
784
+ return_tensors: Optional[Union[str, TensorType]] = None,
785
+ data_format: Optional[ChannelDimension] = None,
786
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
787
+ update_bboxes: bool = True,
788
+ ) -> BatchFeature:
789
+ """
790
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
791
+ in the batch and optionally returns their corresponding pixel mask.
792
+
793
+ Args:
794
+ images (List[`np.ndarray`]):
795
+ Images to pad.
796
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
797
+ Annotations to transform according to the padding that is applied to the images.
798
+ constant_values (`float` or `Iterable[float]`, *optional*):
799
+ The value to use for the padding if `mode` is `"constant"`.
800
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
801
+ Whether to return a pixel mask.
802
+ return_tensors (`str` or `TensorType`, *optional*):
803
+ The type of tensors to return. Can be one of:
804
+ - Unset: Return a list of `np.ndarray`.
805
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
806
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
807
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
808
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
809
+ data_format (`str` or `ChannelDimension`, *optional*):
810
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
811
+ input_data_format (`ChannelDimension` or `str`, *optional*):
812
+ The channel dimension format of the input image. If not provided, it will be inferred.
813
+ update_bboxes (`bool`, *optional*, defaults to `True`):
814
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
815
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
816
+ format, the bounding boxes will not be updated.
817
+ """
818
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
819
+
820
+ annotation_list = annotations if annotations is not None else [None] * len(images)
821
+ padded_images = []
822
+ padded_annotations = []
823
+ for image, annotation in zip(images, annotation_list):
824
+ padded_image, padded_annotation = self._pad_image(
825
+ image,
826
+ pad_size,
827
+ annotation,
828
+ constant_values=constant_values,
829
+ data_format=data_format,
830
+ input_data_format=input_data_format,
831
+ update_bboxes=update_bboxes,
832
+ )
833
+ padded_images.append(padded_image)
834
+ padded_annotations.append(padded_annotation)
835
+
836
+ data = {"pixel_values": padded_images}
837
+
838
+ if return_pixel_mask:
839
+ masks = [
840
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
841
+ for image in images
842
+ ]
843
+ data["pixel_mask"] = masks
844
+
845
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
846
+
847
+ if annotations is not None:
848
+ encoded_inputs["labels"] = [
849
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
850
+ ]
851
+
852
+ return encoded_inputs
853
+
854
+ def preprocess(
855
+ self,
856
+ images: ImageInput,
857
+ annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
858
+ return_segmentation_masks: bool = None,
859
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
860
+ do_resize: Optional[bool] = None,
861
+ size: Optional[Dict[str, int]] = None,
862
+ resample=None, # PILImageResampling
863
+ do_rescale: Optional[bool] = None,
864
+ rescale_factor: Optional[Union[int, float]] = None,
865
+ do_normalize: Optional[bool] = None,
866
+ image_mean: Optional[Union[float, List[float]]] = None,
867
+ image_std: Optional[Union[float, List[float]]] = None,
868
+ do_convert_annotations: Optional[bool] = None,
869
+ do_pad: Optional[bool] = None,
870
+ format: Optional[Union[str, AnnotationFormat]] = None,
871
+ return_tensors: Optional[Union[TensorType, str]] = None,
872
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
873
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
874
+ **kwargs,
875
+ ) -> BatchFeature:
876
+ """
877
+ Preprocess an image or a batch of images so that it can be used by the model.
878
+
879
+ Args:
880
+ images (`ImageInput`):
881
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
882
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
883
+ annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
884
+ List of annotations associated with the image or batch of images. If annotation is for object
885
+ detection, the annotations should be a dictionary with the following keys:
886
+ - "image_id" (`int`): The image id.
887
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
888
+ dictionary. An image can have no annotations, in which case the list should be empty.
889
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
890
+ - "image_id" (`int`): The image id.
891
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
892
+ An image can have no segments, in which case the list should be empty.
893
+ - "file_name" (`str`): The file name of the image.
894
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
895
+ Whether to return segmentation masks.
896
+ masks_path (`str` or `pathlib.Path`, *optional*):
897
+ Path to the directory containing the segmentation masks.
898
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
899
+ Whether to resize the image.
900
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
901
+ Size of the image after resizing.
902
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
903
+ Resampling filter to use when resizing the image.
904
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
905
+ Whether to rescale the image.
906
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
907
+ Rescale factor to use when rescaling the image.
908
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
909
+ Whether to normalize the image.
910
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
911
+ Mean to use when normalizing the image.
912
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
913
+ Standard deviation to use when normalizing the image.
914
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
915
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
916
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
917
+ and in relative coordinates.
918
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
919
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
920
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
921
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
922
+ Format of the annotations.
923
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
924
+ Type of tensors to return. If `None`, will return the list of images.
925
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
926
+ The channel dimension format for the output image. Can be one of:
927
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
928
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
929
+ - Unset: Use the channel dimension format of the input image.
930
+ input_data_format (`ChannelDimension` or `str`, *optional*):
931
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
932
+ from the input image. Can be one of:
933
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
934
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
935
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
936
+ """
937
+ if "pad_and_return_pixel_mask" in kwargs:
938
+ logger.warning_once(
939
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
940
+ "use `do_pad` instead.",
941
+ )
942
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
943
+
944
+ do_resize = self.do_resize if do_resize is None else do_resize
945
+ size = self.size if size is None else size
946
+ size = get_size_dict(size=size, default_to_square=False)
947
+ resample = self.resample if resample is None else resample
948
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
949
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
950
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
951
+ image_mean = self.image_mean if image_mean is None else image_mean
952
+ image_std = self.image_std if image_std is None else image_std
953
+ do_convert_annotations = (
954
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
955
+ )
956
+ do_pad = self.do_pad if do_pad is None else do_pad
957
+ format = self.format if format is None else format
958
+
959
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
960
+
961
+ validate_preprocess_arguments(
962
+ do_rescale=do_rescale,
963
+ rescale_factor=rescale_factor,
964
+ do_normalize=do_normalize,
965
+ image_mean=image_mean,
966
+ image_std=image_std,
967
+ do_resize=do_resize,
968
+ size=size,
969
+ resample=resample,
970
+ )
971
+
972
+ if not is_batched(images):
973
+ images = [images]
974
+ annotations = [annotations] if annotations is not None else None
975
+
976
+ if not valid_images(images):
977
+ raise ValueError(
978
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
979
+ "torch.Tensor, tf.Tensor or jax.ndarray."
980
+ )
981
+ if annotations is not None and len(images) != len(annotations):
982
+ raise ValueError(
983
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
984
+ )
985
+
986
+ format = AnnotationFormat(format)
987
+ if annotations is not None:
988
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
989
+
990
+ if (
991
+ masks_path is not None
992
+ and format == AnnotationFormat.COCO_PANOPTIC
993
+ and not isinstance(masks_path, (pathlib.Path, str))
994
+ ):
995
+ raise ValueError(
996
+ "The path to the directory containing the mask PNG files should be provided as a"
997
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
998
+ )
999
+
1000
+ # All transformations expect numpy arrays
1001
+ images = [to_numpy_array(image) for image in images]
1002
+
1003
+ if is_scaled_image(images[0]) and do_rescale:
1004
+ logger.warning_once(
1005
+ "It looks like you are trying to rescale already rescaled images. If the input"
1006
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1007
+ )
1008
+
1009
+ if input_data_format is None:
1010
+ # We assume that all images have the same channel dimension format.
1011
+ input_data_format = infer_channel_dimension_format(images[0])
1012
+
1013
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1014
+ if annotations is not None:
1015
+ prepared_images = []
1016
+ prepared_annotations = []
1017
+ for image, target in zip(images, annotations):
1018
+ target = self.prepare_annotation(
1019
+ image,
1020
+ target,
1021
+ format,
1022
+ return_segmentation_masks=return_segmentation_masks,
1023
+ masks_path=masks_path,
1024
+ input_data_format=input_data_format,
1025
+ )
1026
+ prepared_images.append(image)
1027
+ prepared_annotations.append(target)
1028
+ images = prepared_images
1029
+ annotations = prepared_annotations
1030
+ del prepared_images, prepared_annotations
1031
+
1032
+ # transformations
1033
+ if do_resize:
1034
+ if annotations is not None:
1035
+ resized_images, resized_annotations = [], []
1036
+ for image, target in zip(images, annotations):
1037
+ orig_size = get_image_size(image, input_data_format)
1038
+ resized_image = self.resize(
1039
+ image, size=size, resample=resample, input_data_format=input_data_format
1040
+ )
1041
+ resized_annotation = self.resize_annotation(
1042
+ target, orig_size, get_image_size(resized_image, input_data_format)
1043
+ )
1044
+ resized_images.append(resized_image)
1045
+ resized_annotations.append(resized_annotation)
1046
+ images = resized_images
1047
+ annotations = resized_annotations
1048
+ del resized_images, resized_annotations
1049
+ else:
1050
+ images = [
1051
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1052
+ for image in images
1053
+ ]
1054
+
1055
+ if do_rescale:
1056
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1057
+
1058
+ if do_normalize:
1059
+ images = [
1060
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1061
+ ]
1062
+
1063
+ if do_convert_annotations and annotations is not None:
1064
+ annotations = [
1065
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1066
+ for annotation, image in zip(annotations, images)
1067
+ ]
1068
+
1069
+ if do_pad:
1070
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1071
+ encoded_inputs = self.pad(
1072
+ images,
1073
+ annotations=annotations,
1074
+ return_pixel_mask=True,
1075
+ data_format=data_format,
1076
+ input_data_format=input_data_format,
1077
+ return_tensors=return_tensors,
1078
+ update_bboxes=do_convert_annotations,
1079
+ )
1080
+ else:
1081
+ images = [
1082
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1083
+ for image in images
1084
+ ]
1085
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1086
+ if annotations is not None:
1087
+ encoded_inputs["labels"] = [
1088
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1089
+ ]
1090
+
1091
+ return encoded_inputs
1092
+
1093
+ def post_process_object_detection(
1094
+ self,
1095
+ outputs,
1096
+ threshold: float = 0.5,
1097
+ target_sizes: Union[TensorType, List[Tuple]] = None,
1098
+ nms_threshold: float = 0.7,
1099
+ ):
1100
+ """
1101
+ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
1102
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1103
+
1104
+ Args:
1105
+ outputs ([`DetrObjectDetectionOutput`]):
1106
+ Raw outputs of the model.
1107
+ threshold (`float`, *optional*, defaults to 0.5):
1108
+ Score threshold to keep object detection predictions.
1109
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1110
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1111
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1112
+ nms_threshold (`float`, *optional*, defaults to 0.7):
1113
+ NMS threshold.
1114
+
1115
+ Returns:
1116
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1117
+ in the batch as predicted by the model.
1118
+ """
1119
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1120
+ batch_size, num_queries, num_labels = out_logits.shape
1121
+
1122
+ if target_sizes is not None:
1123
+ if len(out_logits) != len(target_sizes):
1124
+ raise ValueError(
1125
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1126
+ )
1127
+
1128
+ prob = out_logits.sigmoid()
1129
+
1130
+ all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)
1131
+ all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)
1132
+ all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode="floor")
1133
+ all_labels = all_indexes % out_logits.shape[2]
1134
+
1135
+ boxes = center_to_corners_format(out_bbox)
1136
+ boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))
1137
+
1138
+ # and from relative [0, 1] to absolute [0, height] coordinates
1139
+ if target_sizes is not None:
1140
+ if isinstance(target_sizes, List):
1141
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1142
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1143
+ else:
1144
+ img_h, img_w = target_sizes.unbind(1)
1145
+
1146
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1147
+ boxes = boxes * scale_fct[:, None, :]
1148
+
1149
+ results = []
1150
+ for b in range(batch_size):
1151
+ box = boxes[b]
1152
+ score = all_scores[b]
1153
+ lbls = all_labels[b]
1154
+
1155
+ pre_topk = score.topk(min(10000, num_queries * num_labels)).indices
1156
+ box = box[pre_topk]
1157
+ score = score[pre_topk]
1158
+ lbls = lbls[pre_topk]
1159
+
1160
+ # apply NMS
1161
+ keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]
1162
+ score = score[keep_inds]
1163
+ lbls = lbls[keep_inds]
1164
+ box = box[keep_inds]
1165
+
1166
+ results.append(
1167
+ {
1168
+ "scores": score[score > threshold],
1169
+ "labels": lbls[score > threshold],
1170
+ "boxes": box[score > threshold],
1171
+ }
1172
+ )
1173
+
1174
+ return results
venv/lib/python3.10/site-packages/transformers/models/deta/modeling_deta.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_graphormer"] = [
30
+ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "GraphormerForGraphClassification",
32
+ "GraphormerModel",
33
+ "GraphormerPreTrainedModel",
34
+ ]
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .modeling_graphormer import (
47
+ GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
48
+ GraphormerForGraphClassification,
49
+ GraphormerModel,
50
+ GraphormerPreTrainedModel,
51
+ )
52
+
53
+
54
+ else:
55
+ import sys
56
+
57
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc ADDED
Binary file (4.74 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc ADDED
Binary file (25.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation and HuggingFace
2
+ # Licensed under the MIT License.
3
+
4
+ import cython
5
+
6
+ cimport numpy
7
+ from cython.parallel cimport parallel, prange
8
+
9
+ import numpy as np
10
+
11
+
12
+ # Reduce this number if matrices are too big for large graphs
13
+ UNREACHABLE_NODE_DISTANCE = 510
14
+
15
+ def floyd_warshall(adjacency_matrix):
16
+ """
17
+ Applies the Floyd-Warshall algorithm to the adjacency matrix, to compute the
18
+ shortest paths distance between all nodes, up to UNREACHABLE_NODE_DISTANCE.
19
+ """
20
+ (nrows, ncols) = adjacency_matrix.shape
21
+ assert nrows == ncols
22
+ cdef unsigned int n = nrows
23
+
24
+ adj_mat_copy = adjacency_matrix.astype(np.int32, order='C', casting='safe', copy=True)
25
+ assert adj_mat_copy.flags['C_CONTIGUOUS']
26
+ cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] M = adj_mat_copy
27
+ cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] path = -1 * np.ones([n, n], dtype=np.int32)
28
+
29
+ cdef unsigned int i, j, k
30
+ cdef numpy.int32_t M_ij, M_ik, cost_ikkj
31
+ cdef numpy.int32_t* M_ptr = &M[0,0]
32
+ cdef numpy.int32_t* M_i_ptr
33
+ cdef numpy.int32_t* M_k_ptr
34
+
35
+ # set unreachable nodes distance to UNREACHABLE_NODE_DISTANCE
36
+ for i in range(n):
37
+ for j in range(n):
38
+ if i == j:
39
+ M[i][j] = 0
40
+ elif M[i][j] == 0:
41
+ M[i][j] = UNREACHABLE_NODE_DISTANCE
42
+
43
+ # floyed algo
44
+ for k in range(n):
45
+ M_k_ptr = M_ptr + n*k
46
+ for i in range(n):
47
+ M_i_ptr = M_ptr + n*i
48
+ M_ik = M_i_ptr[k]
49
+ for j in range(n):
50
+ cost_ikkj = M_ik + M_k_ptr[j]
51
+ M_ij = M_i_ptr[j]
52
+ if M_ij > cost_ikkj:
53
+ M_i_ptr[j] = cost_ikkj
54
+ path[i][j] = k
55
+
56
+ # set unreachable path to UNREACHABLE_NODE_DISTANCE
57
+ for i in range(n):
58
+ for j in range(n):
59
+ if M[i][j] >= UNREACHABLE_NODE_DISTANCE:
60
+ path[i][j] = UNREACHABLE_NODE_DISTANCE
61
+ M[i][j] = UNREACHABLE_NODE_DISTANCE
62
+
63
+ return M, path
64
+
65
+
66
+ def get_all_edges(path, i, j):
67
+ """
68
+ Recursive function to compute all possible paths between two nodes from the graph adjacency matrix.
69
+ """
70
+ cdef int k = path[i][j]
71
+ if k == -1:
72
+ return []
73
+ else:
74
+ return get_all_edges(path, i, k) + [k] + get_all_edges(path, k, j)
75
+
76
+
77
+ def gen_edge_input(max_dist, path, edge_feat):
78
+ """
79
+ Generates the full edge feature and adjacency matrix.
80
+ Shape: num_nodes * num_nodes * max_distance_between_nodes * num_edge_features
81
+ Dim 1 is the input node, dim 2 the output node of the edge, dim 3 the depth of the edge, dim 4 the feature
82
+ """
83
+ (nrows, ncols) = path.shape
84
+ assert nrows == ncols
85
+ cdef unsigned int n = nrows
86
+ cdef unsigned int max_dist_copy = max_dist
87
+
88
+ path_copy = path.astype(long, order='C', casting='safe', copy=True)
89
+ edge_feat_copy = edge_feat.astype(long, order='C', casting='safe', copy=True)
90
+ assert path_copy.flags['C_CONTIGUOUS']
91
+ assert edge_feat_copy.flags['C_CONTIGUOUS']
92
+
93
+ cdef numpy.ndarray[numpy.int32_t, ndim=4, mode='c'] edge_fea_all = -1 * np.ones([n, n, max_dist_copy, edge_feat.shape[-1]], dtype=np.int32)
94
+ cdef unsigned int i, j, k, num_path, cur
95
+
96
+ for i in range(n):
97
+ for j in range(n):
98
+ if i == j:
99
+ continue
100
+ if path_copy[i][j] == UNREACHABLE_NODE_DISTANCE:
101
+ continue
102
+ path = [i] + get_all_edges(path_copy, i, j) + [j]
103
+ num_path = len(path) - 1
104
+ for k in range(num_path):
105
+ edge_fea_all[i, j, k, :] = edge_feat_copy[path[k], path[k+1], :]
106
+
107
+ return edge_fea_all
venv/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation and HuggingFace
2
+ # Licensed under the MIT License.
3
+
4
+ from typing import Any, Dict, List, Mapping
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ from ...utils import is_cython_available, requires_backends
10
+
11
+
12
+ if is_cython_available():
13
+ import pyximport
14
+
15
+ pyximport.install(setup_args={"include_dirs": np.get_include()})
16
+ from . import algos_graphormer # noqa E402
17
+
18
+
19
+ def convert_to_single_emb(x, offset: int = 512):
20
+ feature_num = x.shape[1] if len(x.shape) > 1 else 1
21
+ feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int64)
22
+ x = x + feature_offset
23
+ return x
24
+
25
+
26
+ def preprocess_item(item, keep_features=True):
27
+ requires_backends(preprocess_item, ["cython"])
28
+
29
+ if keep_features and "edge_attr" in item.keys(): # edge_attr
30
+ edge_attr = np.asarray(item["edge_attr"], dtype=np.int64)
31
+ else:
32
+ edge_attr = np.ones((len(item["edge_index"][0]), 1), dtype=np.int64) # same embedding for all
33
+
34
+ if keep_features and "node_feat" in item.keys(): # input_nodes
35
+ node_feature = np.asarray(item["node_feat"], dtype=np.int64)
36
+ else:
37
+ node_feature = np.ones((item["num_nodes"], 1), dtype=np.int64) # same embedding for all
38
+
39
+ edge_index = np.asarray(item["edge_index"], dtype=np.int64)
40
+
41
+ input_nodes = convert_to_single_emb(node_feature) + 1
42
+ num_nodes = item["num_nodes"]
43
+
44
+ if len(edge_attr.shape) == 1:
45
+ edge_attr = edge_attr[:, None]
46
+ attn_edge_type = np.zeros([num_nodes, num_nodes, edge_attr.shape[-1]], dtype=np.int64)
47
+ attn_edge_type[edge_index[0], edge_index[1]] = convert_to_single_emb(edge_attr) + 1
48
+
49
+ # node adj matrix [num_nodes, num_nodes] bool
50
+ adj = np.zeros([num_nodes, num_nodes], dtype=bool)
51
+ adj[edge_index[0], edge_index[1]] = True
52
+
53
+ shortest_path_result, path = algos_graphormer.floyd_warshall(adj)
54
+ max_dist = np.amax(shortest_path_result)
55
+
56
+ input_edges = algos_graphormer.gen_edge_input(max_dist, path, attn_edge_type)
57
+ attn_bias = np.zeros([num_nodes + 1, num_nodes + 1], dtype=np.single) # with graph token
58
+
59
+ # combine
60
+ item["input_nodes"] = input_nodes + 1 # we shift all indices by one for padding
61
+ item["attn_bias"] = attn_bias
62
+ item["attn_edge_type"] = attn_edge_type
63
+ item["spatial_pos"] = shortest_path_result.astype(np.int64) + 1 # we shift all indices by one for padding
64
+ item["in_degree"] = np.sum(adj, axis=1).reshape(-1) + 1 # we shift all indices by one for padding
65
+ item["out_degree"] = item["in_degree"] # for undirected graph
66
+ item["input_edges"] = input_edges + 1 # we shift all indices by one for padding
67
+ if "labels" not in item:
68
+ item["labels"] = item["y"]
69
+
70
+ return item
71
+
72
+
73
+ class GraphormerDataCollator:
74
+ def __init__(self, spatial_pos_max=20, on_the_fly_processing=False):
75
+ if not is_cython_available():
76
+ raise ImportError("Graphormer preprocessing needs Cython (pyximport)")
77
+
78
+ self.spatial_pos_max = spatial_pos_max
79
+ self.on_the_fly_processing = on_the_fly_processing
80
+
81
+ def __call__(self, features: List[dict]) -> Dict[str, Any]:
82
+ if self.on_the_fly_processing:
83
+ features = [preprocess_item(i) for i in features]
84
+
85
+ if not isinstance(features[0], Mapping):
86
+ features = [vars(f) for f in features]
87
+ batch = {}
88
+
89
+ max_node_num = max(len(i["input_nodes"]) for i in features)
90
+ node_feat_size = len(features[0]["input_nodes"][0])
91
+ edge_feat_size = len(features[0]["attn_edge_type"][0][0])
92
+ max_dist = max(len(i["input_edges"][0][0]) for i in features)
93
+ edge_input_size = len(features[0]["input_edges"][0][0][0])
94
+ batch_size = len(features)
95
+
96
+ batch["attn_bias"] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float)
97
+ batch["attn_edge_type"] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long)
98
+ batch["spatial_pos"] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long)
99
+ batch["in_degree"] = torch.zeros(batch_size, max_node_num, dtype=torch.long)
100
+ batch["input_nodes"] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long)
101
+ batch["input_edges"] = torch.zeros(
102
+ batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long
103
+ )
104
+
105
+ for ix, f in enumerate(features):
106
+ for k in ["attn_bias", "attn_edge_type", "spatial_pos", "in_degree", "input_nodes", "input_edges"]:
107
+ f[k] = torch.tensor(f[k])
108
+
109
+ if len(f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max]) > 0:
110
+ f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max] = float("-inf")
111
+
112
+ batch["attn_bias"][ix, : f["attn_bias"].shape[0], : f["attn_bias"].shape[1]] = f["attn_bias"]
113
+ batch["attn_edge_type"][ix, : f["attn_edge_type"].shape[0], : f["attn_edge_type"].shape[1], :] = f[
114
+ "attn_edge_type"
115
+ ]
116
+ batch["spatial_pos"][ix, : f["spatial_pos"].shape[0], : f["spatial_pos"].shape[1]] = f["spatial_pos"]
117
+ batch["in_degree"][ix, : f["in_degree"].shape[0]] = f["in_degree"]
118
+ batch["input_nodes"][ix, : f["input_nodes"].shape[0], :] = f["input_nodes"]
119
+ batch["input_edges"][
120
+ ix, : f["input_edges"].shape[0], : f["input_edges"].shape[1], : f["input_edges"].shape[2], :
121
+ ] = f["input_edges"]
122
+
123
+ batch["out_degree"] = batch["in_degree"]
124
+
125
+ sample = features[0]["labels"]
126
+ if len(sample) == 1: # one task
127
+ if isinstance(sample[0], float): # regression
128
+ batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features]))
129
+ else: # binary classification
130
+ batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features]))
131
+ else: # multi task classification, left to float to keep the NaNs
132
+ batch["labels"] = torch.from_numpy(np.stack([i["labels"] for i in features], axis=0))
133
+
134
+ return batch
venv/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Graphormer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GraphormerConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an
30
+ Graphormer model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the Graphormer
32
+ [graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ num_classes (`int`, *optional*, defaults to 1):
40
+ Number of target classes or labels, set to n for binary classification of n tasks.
41
+ num_atoms (`int`, *optional*, defaults to 512*9):
42
+ Number of node types in the graphs.
43
+ num_edges (`int`, *optional*, defaults to 512*3):
44
+ Number of edges types in the graph.
45
+ num_in_degree (`int`, *optional*, defaults to 512):
46
+ Number of in degrees types in the input graphs.
47
+ num_out_degree (`int`, *optional*, defaults to 512):
48
+ Number of out degrees types in the input graphs.
49
+ num_edge_dis (`int`, *optional*, defaults to 128):
50
+ Number of edge dis in the input graphs.
51
+ multi_hop_max_dist (`int`, *optional*, defaults to 20):
52
+ Maximum distance of multi hop edges between two nodes.
53
+ spatial_pos_max (`int`, *optional*, defaults to 1024):
54
+ Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and
55
+ collation.
56
+ edge_type (`str`, *optional*, defaults to multihop):
57
+ Type of edge relation chosen.
58
+ max_nodes (`int`, *optional*, defaults to 512):
59
+ Maximum number of nodes which can be parsed for the input graphs.
60
+ share_input_output_embed (`bool`, *optional*, defaults to `False`):
61
+ Shares the embedding layer between encoder and decoder - careful, True is not implemented.
62
+ num_layers (`int`, *optional*, defaults to 12):
63
+ Number of layers.
64
+ embedding_dim (`int`, *optional*, defaults to 768):
65
+ Dimension of the embedding layer in encoder.
66
+ ffn_embedding_dim (`int`, *optional*, defaults to 768):
67
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
68
+ num_attention_heads (`int`, *optional*, defaults to 32):
69
+ Number of attention heads in the encoder.
70
+ self_attention (`bool`, *optional*, defaults to `True`):
71
+ Model is self attentive (False not implemented).
72
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
73
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
74
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
75
+ dropout (`float`, *optional*, defaults to 0.1):
76
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
77
+ attention_dropout (`float`, *optional*, defaults to 0.1):
78
+ The dropout probability for the attention weights.
79
+ activation_dropout (`float`, *optional*, defaults to 0.1):
80
+ The dropout probability for the activation of the linear transformer layer.
81
+ layerdrop (`float`, *optional*, defaults to 0.0):
82
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
83
+ for more details.
84
+ bias (`bool`, *optional*, defaults to `True`):
85
+ Uses bias in the attention module - unsupported at the moment.
86
+ embed_scale(`float`, *optional*, defaults to None):
87
+ Scaling factor for the node embeddings.
88
+ num_trans_layers_to_freeze (`int`, *optional*, defaults to 0):
89
+ Number of transformer layers to freeze.
90
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
91
+ Normalize features before encoding the graph.
92
+ pre_layernorm (`bool`, *optional*, defaults to `False`):
93
+ Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be
94
+ used.
95
+ apply_graphormer_init (`bool`, *optional*, defaults to `False`):
96
+ Apply a custom graphormer initialisation to the model before training.
97
+ freeze_embeddings (`bool`, *optional*, defaults to `False`):
98
+ Freeze the embedding layer, or train it along the model.
99
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
100
+ Apply the layer norm before each encoder block.
101
+ q_noise (`float`, *optional*, defaults to 0.0):
102
+ Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For
103
+ more detail, see fairseq's documentation on quant_noise).
104
+ qn_block_size (`int`, *optional*, defaults to 8):
105
+ Size of the blocks for subsequent quantization with iPQ (see q_noise).
106
+ kdim (`int`, *optional*, defaults to None):
107
+ Dimension of the key in the attention, if different from the other values.
108
+ vdim (`int`, *optional*, defaults to None):
109
+ Dimension of the value in the attention, if different from the other values.
110
+ use_cache (`bool`, *optional*, defaults to `True`):
111
+ Whether or not the model should return the last key/values attentions (not used by all models).
112
+ traceable (`bool`, *optional*, defaults to `False`):
113
+ Changes return value of the encoder's inner_state to stacked tensors.
114
+
115
+ Example:
116
+ ```python
117
+ >>> from transformers import GraphormerForGraphClassification, GraphormerConfig
118
+
119
+ >>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration
120
+ >>> configuration = GraphormerConfig()
121
+
122
+ >>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration
123
+ >>> model = GraphormerForGraphClassification(configuration)
124
+
125
+ >>> # Accessing the model configuration
126
+ >>> configuration = model.config
127
+ ```
128
+ """
129
+
130
+ model_type = "graphormer"
131
+ keys_to_ignore_at_inference = ["past_key_values"]
132
+
133
+ def __init__(
134
+ self,
135
+ num_classes: int = 1,
136
+ num_atoms: int = 512 * 9,
137
+ num_edges: int = 512 * 3,
138
+ num_in_degree: int = 512,
139
+ num_out_degree: int = 512,
140
+ num_spatial: int = 512,
141
+ num_edge_dis: int = 128,
142
+ multi_hop_max_dist: int = 5, # sometimes is 20
143
+ spatial_pos_max: int = 1024,
144
+ edge_type: str = "multi_hop",
145
+ max_nodes: int = 512,
146
+ share_input_output_embed: bool = False,
147
+ num_hidden_layers: int = 12,
148
+ embedding_dim: int = 768,
149
+ ffn_embedding_dim: int = 768,
150
+ num_attention_heads: int = 32,
151
+ dropout: float = 0.1,
152
+ attention_dropout: float = 0.1,
153
+ activation_dropout: float = 0.1,
154
+ layerdrop: float = 0.0,
155
+ encoder_normalize_before: bool = False,
156
+ pre_layernorm: bool = False,
157
+ apply_graphormer_init: bool = False,
158
+ activation_fn: str = "gelu",
159
+ embed_scale: float = None,
160
+ freeze_embeddings: bool = False,
161
+ num_trans_layers_to_freeze: int = 0,
162
+ traceable: bool = False,
163
+ q_noise: float = 0.0,
164
+ qn_block_size: int = 8,
165
+ kdim: int = None,
166
+ vdim: int = None,
167
+ bias: bool = True,
168
+ self_attention: bool = True,
169
+ pad_token_id=0,
170
+ bos_token_id=1,
171
+ eos_token_id=2,
172
+ **kwargs,
173
+ ):
174
+ self.num_classes = num_classes
175
+ self.num_atoms = num_atoms
176
+ self.num_in_degree = num_in_degree
177
+ self.num_out_degree = num_out_degree
178
+ self.num_edges = num_edges
179
+ self.num_spatial = num_spatial
180
+ self.num_edge_dis = num_edge_dis
181
+ self.edge_type = edge_type
182
+ self.multi_hop_max_dist = multi_hop_max_dist
183
+ self.spatial_pos_max = spatial_pos_max
184
+ self.max_nodes = max_nodes
185
+ self.num_hidden_layers = num_hidden_layers
186
+ self.embedding_dim = embedding_dim
187
+ self.hidden_size = embedding_dim
188
+ self.ffn_embedding_dim = ffn_embedding_dim
189
+ self.num_attention_heads = num_attention_heads
190
+ self.dropout = dropout
191
+ self.attention_dropout = attention_dropout
192
+ self.activation_dropout = activation_dropout
193
+ self.layerdrop = layerdrop
194
+ self.encoder_normalize_before = encoder_normalize_before
195
+ self.pre_layernorm = pre_layernorm
196
+ self.apply_graphormer_init = apply_graphormer_init
197
+ self.activation_fn = activation_fn
198
+ self.embed_scale = embed_scale
199
+ self.freeze_embeddings = freeze_embeddings
200
+ self.num_trans_layers_to_freeze = num_trans_layers_to_freeze
201
+ self.share_input_output_embed = share_input_output_embed
202
+ self.traceable = traceable
203
+ self.q_noise = q_noise
204
+ self.qn_block_size = qn_block_size
205
+
206
+ # These parameters are here for future extensions
207
+ # atm, the model only supports self attention
208
+ self.kdim = kdim
209
+ self.vdim = vdim
210
+ self.self_attention = self_attention
211
+ self.bias = bias
212
+
213
+ super().__init__(
214
+ pad_token_id=pad_token_id,
215
+ bos_token_id=bos_token_id,
216
+ eos_token_id=eos_token_id,
217
+ **kwargs,
218
+ )
venv/lib/python3.10/site-packages/transformers/models/graphormer/modeling_graphormer.py ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft, clefourrier The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Graphormer model."""
16
+
17
+ import math
18
+ from typing import Iterable, Iterator, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutputWithNoAttention,
27
+ SequenceClassifierOutput,
28
+ )
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import logging
31
+ from .configuration_graphormer import GraphormerConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _CHECKPOINT_FOR_DOC = "graphormer-base-pcqm4mv1"
37
+ _CONFIG_FOR_DOC = "GraphormerConfig"
38
+
39
+
40
+ from ..deprecated._archive_maps import GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
41
+
42
+
43
+ def quant_noise(module: nn.Module, p: float, block_size: int):
44
+ """
45
+ From:
46
+ https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/quant_noise.py
47
+
48
+ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product
49
+ Quantization as described in "Training with Quantization Noise for Extreme Model Compression"
50
+
51
+ Args:
52
+ - module: nn.Module
53
+ - p: amount of Quantization Noise
54
+ - block_size: size of the blocks for subsequent quantization with iPQ
55
+
56
+ Remarks:
57
+ - Module weights must have the right sizes wrt the block size
58
+ - Only Linear, Embedding and Conv2d modules are supported for the moment
59
+ - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down:
60
+ Revisiting the Quantization of Neural Networks"
61
+ - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping
62
+ blocks
63
+ """
64
+
65
+ # if no quantization noise, don't register hook
66
+ if p <= 0:
67
+ return module
68
+
69
+ # supported modules
70
+ if not isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)):
71
+ raise NotImplementedError("Module unsupported for quant_noise.")
72
+
73
+ # test whether module.weight has the right sizes wrt block_size
74
+ is_conv = module.weight.ndim == 4
75
+
76
+ # 2D matrix
77
+ if not is_conv:
78
+ if module.weight.size(1) % block_size != 0:
79
+ raise AssertionError("Input features must be a multiple of block sizes")
80
+
81
+ # 4D matrix
82
+ else:
83
+ # 1x1 convolutions
84
+ if module.kernel_size == (1, 1):
85
+ if module.in_channels % block_size != 0:
86
+ raise AssertionError("Input channels must be a multiple of block sizes")
87
+ # regular convolutions
88
+ else:
89
+ k = module.kernel_size[0] * module.kernel_size[1]
90
+ if k % block_size != 0:
91
+ raise AssertionError("Kernel size must be a multiple of block size")
92
+
93
+ def _forward_pre_hook(mod, input):
94
+ # no noise for evaluation
95
+ if mod.training:
96
+ if not is_conv:
97
+ # gather weight and sizes
98
+ weight = mod.weight
99
+ in_features = weight.size(1)
100
+ out_features = weight.size(0)
101
+
102
+ # split weight matrix into blocks and randomly drop selected blocks
103
+ mask = torch.zeros(in_features // block_size * out_features, device=weight.device)
104
+ mask.bernoulli_(p)
105
+ mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
106
+
107
+ else:
108
+ # gather weight and sizes
109
+ weight = mod.weight
110
+ in_channels = mod.in_channels
111
+ out_channels = mod.out_channels
112
+
113
+ # split weight matrix into blocks and randomly drop selected blocks
114
+ if mod.kernel_size == (1, 1):
115
+ mask = torch.zeros(
116
+ int(in_channels // block_size * out_channels),
117
+ device=weight.device,
118
+ )
119
+ mask.bernoulli_(p)
120
+ mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
121
+ else:
122
+ mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)
123
+ mask.bernoulli_(p)
124
+ mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
125
+
126
+ # scale weights and apply mask
127
+ mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript
128
+ s = 1 / (1 - p)
129
+ mod.weight.data = s * weight.masked_fill(mask, 0)
130
+
131
+ module.register_forward_pre_hook(_forward_pre_hook)
132
+ return module
133
+
134
+
135
+ class LayerDropModuleList(nn.ModuleList):
136
+ """
137
+ From:
138
+ https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/layer_drop.py
139
+ A LayerDrop implementation based on [`torch.nn.ModuleList`]. LayerDrop as described in
140
+ https://arxiv.org/abs/1909.11556.
141
+
142
+ We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During
143
+ evaluation we always iterate over all layers.
144
+
145
+ Usage:
146
+
147
+ ```python
148
+ layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
149
+ for layer in layers: # this might iterate over layers 1 and 3
150
+ x = layer(x)
151
+ for layer in layers: # this might iterate over all layers
152
+ x = layer(x)
153
+ for layer in layers: # this might not iterate over any layers
154
+ x = layer(x)
155
+ ```
156
+
157
+ Args:
158
+ p (float): probability of dropping out each layer
159
+ modules (iterable, optional): an iterable of modules to add
160
+ """
161
+
162
+ def __init__(self, p: float, modules: Optional[Iterable[nn.Module]] = None):
163
+ super().__init__(modules)
164
+ self.p = p
165
+
166
+ def __iter__(self) -> Iterator[nn.Module]:
167
+ dropout_probs = torch.empty(len(self)).uniform_()
168
+ for i, m in enumerate(super().__iter__()):
169
+ if not self.training or (dropout_probs[i] > self.p):
170
+ yield m
171
+
172
+
173
+ class GraphormerGraphNodeFeature(nn.Module):
174
+ """
175
+ Compute node features for each node in the graph.
176
+ """
177
+
178
+ def __init__(self, config: GraphormerConfig):
179
+ super().__init__()
180
+ self.num_heads = config.num_attention_heads
181
+ self.num_atoms = config.num_atoms
182
+
183
+ self.atom_encoder = nn.Embedding(config.num_atoms + 1, config.hidden_size, padding_idx=config.pad_token_id)
184
+ self.in_degree_encoder = nn.Embedding(
185
+ config.num_in_degree, config.hidden_size, padding_idx=config.pad_token_id
186
+ )
187
+ self.out_degree_encoder = nn.Embedding(
188
+ config.num_out_degree, config.hidden_size, padding_idx=config.pad_token_id
189
+ )
190
+
191
+ self.graph_token = nn.Embedding(1, config.hidden_size)
192
+
193
+ def forward(
194
+ self,
195
+ input_nodes: torch.LongTensor,
196
+ in_degree: torch.LongTensor,
197
+ out_degree: torch.LongTensor,
198
+ ) -> torch.Tensor:
199
+ n_graph, n_node = input_nodes.size()[:2]
200
+
201
+ node_feature = ( # node feature + graph token
202
+ self.atom_encoder(input_nodes).sum(dim=-2) # [n_graph, n_node, n_hidden]
203
+ + self.in_degree_encoder(in_degree)
204
+ + self.out_degree_encoder(out_degree)
205
+ )
206
+
207
+ graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)
208
+
209
+ graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
210
+
211
+ return graph_node_feature
212
+
213
+
214
+ class GraphormerGraphAttnBias(nn.Module):
215
+ """
216
+ Compute attention bias for each head.
217
+ """
218
+
219
+ def __init__(self, config: GraphormerConfig):
220
+ super().__init__()
221
+ self.num_heads = config.num_attention_heads
222
+ self.multi_hop_max_dist = config.multi_hop_max_dist
223
+
224
+ # We do not change edge feature embedding learning, as edge embeddings are represented as a combination of the original features
225
+ # + shortest path
226
+ self.edge_encoder = nn.Embedding(config.num_edges + 1, config.num_attention_heads, padding_idx=0)
227
+
228
+ self.edge_type = config.edge_type
229
+ if self.edge_type == "multi_hop":
230
+ self.edge_dis_encoder = nn.Embedding(
231
+ config.num_edge_dis * config.num_attention_heads * config.num_attention_heads,
232
+ 1,
233
+ )
234
+
235
+ self.spatial_pos_encoder = nn.Embedding(config.num_spatial, config.num_attention_heads, padding_idx=0)
236
+
237
+ self.graph_token_virtual_distance = nn.Embedding(1, config.num_attention_heads)
238
+
239
+ def forward(
240
+ self,
241
+ input_nodes: torch.LongTensor,
242
+ attn_bias: torch.Tensor,
243
+ spatial_pos: torch.LongTensor,
244
+ input_edges: torch.LongTensor,
245
+ attn_edge_type: torch.LongTensor,
246
+ ) -> torch.Tensor:
247
+ n_graph, n_node = input_nodes.size()[:2]
248
+ graph_attn_bias = attn_bias.clone()
249
+ graph_attn_bias = graph_attn_bias.unsqueeze(1).repeat(
250
+ 1, self.num_heads, 1, 1
251
+ ) # [n_graph, n_head, n_node+1, n_node+1]
252
+
253
+ # spatial pos
254
+ # [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
255
+ spatial_pos_bias = self.spatial_pos_encoder(spatial_pos).permute(0, 3, 1, 2)
256
+ graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + spatial_pos_bias
257
+
258
+ # reset spatial pos here
259
+ t = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)
260
+ graph_attn_bias[:, :, 1:, 0] = graph_attn_bias[:, :, 1:, 0] + t
261
+ graph_attn_bias[:, :, 0, :] = graph_attn_bias[:, :, 0, :] + t
262
+
263
+ # edge feature
264
+ if self.edge_type == "multi_hop":
265
+ spatial_pos_ = spatial_pos.clone()
266
+
267
+ spatial_pos_[spatial_pos_ == 0] = 1 # set pad to 1
268
+ # set 1 to 1, input_nodes > 1 to input_nodes - 1
269
+ spatial_pos_ = torch.where(spatial_pos_ > 1, spatial_pos_ - 1, spatial_pos_)
270
+ if self.multi_hop_max_dist > 0:
271
+ spatial_pos_ = spatial_pos_.clamp(0, self.multi_hop_max_dist)
272
+ input_edges = input_edges[:, :, :, : self.multi_hop_max_dist, :]
273
+ # [n_graph, n_node, n_node, max_dist, n_head]
274
+
275
+ input_edges = self.edge_encoder(input_edges).mean(-2)
276
+ max_dist = input_edges.size(-2)
277
+ edge_input_flat = input_edges.permute(3, 0, 1, 2, 4).reshape(max_dist, -1, self.num_heads)
278
+ edge_input_flat = torch.bmm(
279
+ edge_input_flat,
280
+ self.edge_dis_encoder.weight.reshape(-1, self.num_heads, self.num_heads)[:max_dist, :, :],
281
+ )
282
+ input_edges = edge_input_flat.reshape(max_dist, n_graph, n_node, n_node, self.num_heads).permute(
283
+ 1, 2, 3, 0, 4
284
+ )
285
+ input_edges = (input_edges.sum(-2) / (spatial_pos_.float().unsqueeze(-1))).permute(0, 3, 1, 2)
286
+ else:
287
+ # [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
288
+ input_edges = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)
289
+
290
+ graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + input_edges
291
+ graph_attn_bias = graph_attn_bias + attn_bias.unsqueeze(1) # reset
292
+
293
+ return graph_attn_bias
294
+
295
+
296
+ class GraphormerMultiheadAttention(nn.Module):
297
+ """Multi-headed attention.
298
+
299
+ See "Attention Is All You Need" for more details.
300
+ """
301
+
302
+ def __init__(self, config: GraphormerConfig):
303
+ super().__init__()
304
+ self.embedding_dim = config.embedding_dim
305
+ self.kdim = config.kdim if config.kdim is not None else config.embedding_dim
306
+ self.vdim = config.vdim if config.vdim is not None else config.embedding_dim
307
+ self.qkv_same_dim = self.kdim == config.embedding_dim and self.vdim == config.embedding_dim
308
+
309
+ self.num_heads = config.num_attention_heads
310
+ self.attention_dropout_module = torch.nn.Dropout(p=config.attention_dropout, inplace=False)
311
+
312
+ self.head_dim = config.embedding_dim // config.num_attention_heads
313
+ if not (self.head_dim * config.num_attention_heads == self.embedding_dim):
314
+ raise AssertionError("The embedding_dim must be divisible by num_heads.")
315
+ self.scaling = self.head_dim**-0.5
316
+
317
+ self.self_attention = True # config.self_attention
318
+ if not (self.self_attention):
319
+ raise NotImplementedError("The Graphormer model only supports self attention for now.")
320
+ if self.self_attention and not self.qkv_same_dim:
321
+ raise AssertionError("Self-attention requires query, key and value to be of the same size.")
322
+
323
+ self.k_proj = quant_noise(
324
+ nn.Linear(self.kdim, config.embedding_dim, bias=config.bias),
325
+ config.q_noise,
326
+ config.qn_block_size,
327
+ )
328
+ self.v_proj = quant_noise(
329
+ nn.Linear(self.vdim, config.embedding_dim, bias=config.bias),
330
+ config.q_noise,
331
+ config.qn_block_size,
332
+ )
333
+ self.q_proj = quant_noise(
334
+ nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),
335
+ config.q_noise,
336
+ config.qn_block_size,
337
+ )
338
+
339
+ self.out_proj = quant_noise(
340
+ nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),
341
+ config.q_noise,
342
+ config.qn_block_size,
343
+ )
344
+
345
+ self.onnx_trace = False
346
+
347
+ def reset_parameters(self):
348
+ if self.qkv_same_dim:
349
+ # Empirically observed the convergence to be much better with
350
+ # the scaled initialization
351
+ nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
352
+ nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
353
+ nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
354
+ else:
355
+ nn.init.xavier_uniform_(self.k_proj.weight)
356
+ nn.init.xavier_uniform_(self.v_proj.weight)
357
+ nn.init.xavier_uniform_(self.q_proj.weight)
358
+
359
+ nn.init.xavier_uniform_(self.out_proj.weight)
360
+ if self.out_proj.bias is not None:
361
+ nn.init.constant_(self.out_proj.bias, 0.0)
362
+
363
+ def forward(
364
+ self,
365
+ query: torch.LongTensor,
366
+ key: Optional[torch.Tensor],
367
+ value: Optional[torch.Tensor],
368
+ attn_bias: Optional[torch.Tensor],
369
+ key_padding_mask: Optional[torch.Tensor] = None,
370
+ need_weights: bool = True,
371
+ attn_mask: Optional[torch.Tensor] = None,
372
+ before_softmax: bool = False,
373
+ need_head_weights: bool = False,
374
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
375
+ """
376
+ Args:
377
+ key_padding_mask (Bytetorch.Tensor, optional): mask to exclude
378
+ keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s.
379
+ need_weights (bool, optional): return the attention weights,
380
+ averaged over heads (default: False).
381
+ attn_mask (Bytetorch.Tensor, optional): typically used to
382
+ implement causal attention, where the mask prevents the attention from looking forward in time
383
+ (default: None).
384
+ before_softmax (bool, optional): return the raw attention
385
+ weights and values before the attention softmax.
386
+ need_head_weights (bool, optional): return the attention
387
+ weights for each head. Implies *need_weights*. Default: return the average attention weights over all
388
+ heads.
389
+ """
390
+ if need_head_weights:
391
+ need_weights = True
392
+
393
+ tgt_len, bsz, embedding_dim = query.size()
394
+ src_len = tgt_len
395
+ if not (embedding_dim == self.embedding_dim):
396
+ raise AssertionError(
397
+ f"The query embedding dimension {embedding_dim} is not equal to the expected embedding_dim"
398
+ f" {self.embedding_dim}."
399
+ )
400
+ if not (list(query.size()) == [tgt_len, bsz, embedding_dim]):
401
+ raise AssertionError("Query size incorrect in Graphormer, compared to model dimensions.")
402
+
403
+ if key is not None:
404
+ src_len, key_bsz, _ = key.size()
405
+ if not torch.jit.is_scripting():
406
+ if (key_bsz != bsz) or (value is None) or not (src_len, bsz == value.shape[:2]):
407
+ raise AssertionError(
408
+ "The batch shape does not match the key or value shapes provided to the attention."
409
+ )
410
+
411
+ q = self.q_proj(query)
412
+ k = self.k_proj(query)
413
+ v = self.v_proj(query)
414
+
415
+ q *= self.scaling
416
+
417
+ q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
418
+ if k is not None:
419
+ k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
420
+ if v is not None:
421
+ v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
422
+
423
+ if (k is None) or not (k.size(1) == src_len):
424
+ raise AssertionError("The shape of the key generated in the attention is incorrect")
425
+
426
+ # This is part of a workaround to get around fork/join parallelism
427
+ # not supporting Optional types.
428
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
429
+ key_padding_mask = None
430
+
431
+ if key_padding_mask is not None:
432
+ if key_padding_mask.size(0) != bsz or key_padding_mask.size(1) != src_len:
433
+ raise AssertionError(
434
+ "The shape of the generated padding mask for the key does not match expected dimensions."
435
+ )
436
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
437
+ attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
438
+
439
+ if list(attn_weights.size()) != [bsz * self.num_heads, tgt_len, src_len]:
440
+ raise AssertionError("The attention weights generated do not match the expected dimensions.")
441
+
442
+ if attn_bias is not None:
443
+ attn_weights += attn_bias.view(bsz * self.num_heads, tgt_len, src_len)
444
+
445
+ if attn_mask is not None:
446
+ attn_mask = attn_mask.unsqueeze(0)
447
+ attn_weights += attn_mask
448
+
449
+ if key_padding_mask is not None:
450
+ # don't attend to padding symbols
451
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
452
+ attn_weights = attn_weights.masked_fill(
453
+ key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
454
+ )
455
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
456
+
457
+ if before_softmax:
458
+ return attn_weights, v
459
+
460
+ attn_weights_float = torch.nn.functional.softmax(attn_weights, dim=-1)
461
+ attn_weights = attn_weights_float.type_as(attn_weights)
462
+ attn_probs = self.attention_dropout_module(attn_weights)
463
+
464
+ if v is None:
465
+ raise AssertionError("No value generated")
466
+ attn = torch.bmm(attn_probs, v)
467
+ if list(attn.size()) != [bsz * self.num_heads, tgt_len, self.head_dim]:
468
+ raise AssertionError("The attention generated do not match the expected dimensions.")
469
+
470
+ attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embedding_dim)
471
+ attn: torch.Tensor = self.out_proj(attn)
472
+
473
+ attn_weights = None
474
+ if need_weights:
475
+ attn_weights = attn_weights_float.contiguous().view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
476
+ if not need_head_weights:
477
+ # average attention weights over heads
478
+ attn_weights = attn_weights.mean(dim=0)
479
+
480
+ return attn, attn_weights
481
+
482
+ def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor:
483
+ return attn_weights
484
+
485
+
486
+ class GraphormerGraphEncoderLayer(nn.Module):
487
+ def __init__(self, config: GraphormerConfig) -> None:
488
+ super().__init__()
489
+
490
+ # Initialize parameters
491
+ self.embedding_dim = config.embedding_dim
492
+ self.num_attention_heads = config.num_attention_heads
493
+ self.q_noise = config.q_noise
494
+ self.qn_block_size = config.qn_block_size
495
+ self.pre_layernorm = config.pre_layernorm
496
+
497
+ self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
498
+
499
+ self.activation_dropout_module = torch.nn.Dropout(p=config.activation_dropout, inplace=False)
500
+
501
+ # Initialize blocks
502
+ self.activation_fn = ACT2FN[config.activation_fn]
503
+ self.self_attn = GraphormerMultiheadAttention(config)
504
+
505
+ # layer norm associated with the self attention layer
506
+ self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim)
507
+
508
+ self.fc1 = self.build_fc(
509
+ self.embedding_dim,
510
+ config.ffn_embedding_dim,
511
+ q_noise=config.q_noise,
512
+ qn_block_size=config.qn_block_size,
513
+ )
514
+ self.fc2 = self.build_fc(
515
+ config.ffn_embedding_dim,
516
+ self.embedding_dim,
517
+ q_noise=config.q_noise,
518
+ qn_block_size=config.qn_block_size,
519
+ )
520
+
521
+ # layer norm associated with the position wise feed-forward NN
522
+ self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
523
+
524
+ def build_fc(
525
+ self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int
526
+ ) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]:
527
+ return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
528
+
529
+ def forward(
530
+ self,
531
+ input_nodes: torch.Tensor,
532
+ self_attn_bias: Optional[torch.Tensor] = None,
533
+ self_attn_mask: Optional[torch.Tensor] = None,
534
+ self_attn_padding_mask: Optional[torch.Tensor] = None,
535
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
536
+ """
537
+ nn.LayerNorm is applied either before or after the self-attention/ffn modules similar to the original
538
+ Transformer implementation.
539
+ """
540
+ residual = input_nodes
541
+ if self.pre_layernorm:
542
+ input_nodes = self.self_attn_layer_norm(input_nodes)
543
+
544
+ input_nodes, attn = self.self_attn(
545
+ query=input_nodes,
546
+ key=input_nodes,
547
+ value=input_nodes,
548
+ attn_bias=self_attn_bias,
549
+ key_padding_mask=self_attn_padding_mask,
550
+ need_weights=False,
551
+ attn_mask=self_attn_mask,
552
+ )
553
+ input_nodes = self.dropout_module(input_nodes)
554
+ input_nodes = residual + input_nodes
555
+ if not self.pre_layernorm:
556
+ input_nodes = self.self_attn_layer_norm(input_nodes)
557
+
558
+ residual = input_nodes
559
+ if self.pre_layernorm:
560
+ input_nodes = self.final_layer_norm(input_nodes)
561
+ input_nodes = self.activation_fn(self.fc1(input_nodes))
562
+ input_nodes = self.activation_dropout_module(input_nodes)
563
+ input_nodes = self.fc2(input_nodes)
564
+ input_nodes = self.dropout_module(input_nodes)
565
+ input_nodes = residual + input_nodes
566
+ if not self.pre_layernorm:
567
+ input_nodes = self.final_layer_norm(input_nodes)
568
+
569
+ return input_nodes, attn
570
+
571
+
572
+ class GraphormerGraphEncoder(nn.Module):
573
+ def __init__(self, config: GraphormerConfig):
574
+ super().__init__()
575
+
576
+ self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
577
+ self.layerdrop = config.layerdrop
578
+ self.embedding_dim = config.embedding_dim
579
+ self.apply_graphormer_init = config.apply_graphormer_init
580
+ self.traceable = config.traceable
581
+
582
+ self.graph_node_feature = GraphormerGraphNodeFeature(config)
583
+ self.graph_attn_bias = GraphormerGraphAttnBias(config)
584
+
585
+ self.embed_scale = config.embed_scale
586
+
587
+ if config.q_noise > 0:
588
+ self.quant_noise = quant_noise(
589
+ nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
590
+ config.q_noise,
591
+ config.qn_block_size,
592
+ )
593
+ else:
594
+ self.quant_noise = None
595
+
596
+ if config.encoder_normalize_before:
597
+ self.emb_layer_norm = nn.LayerNorm(self.embedding_dim)
598
+ else:
599
+ self.emb_layer_norm = None
600
+
601
+ if config.pre_layernorm:
602
+ self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
603
+
604
+ if self.layerdrop > 0.0:
605
+ self.layers = LayerDropModuleList(p=self.layerdrop)
606
+ else:
607
+ self.layers = nn.ModuleList([])
608
+ self.layers.extend([GraphormerGraphEncoderLayer(config) for _ in range(config.num_hidden_layers)])
609
+
610
+ # Apply initialization of model params after building the model
611
+ if config.freeze_embeddings:
612
+ raise NotImplementedError("Freezing embeddings is not implemented yet.")
613
+
614
+ for layer in range(config.num_trans_layers_to_freeze):
615
+ m = self.layers[layer]
616
+ if m is not None:
617
+ for p in m.parameters():
618
+ p.requires_grad = False
619
+
620
+ def forward(
621
+ self,
622
+ input_nodes: torch.LongTensor,
623
+ input_edges: torch.LongTensor,
624
+ attn_bias: torch.Tensor,
625
+ in_degree: torch.LongTensor,
626
+ out_degree: torch.LongTensor,
627
+ spatial_pos: torch.LongTensor,
628
+ attn_edge_type: torch.LongTensor,
629
+ perturb=None,
630
+ last_state_only: bool = False,
631
+ token_embeddings: Optional[torch.Tensor] = None,
632
+ attn_mask: Optional[torch.Tensor] = None,
633
+ ) -> Tuple[Union[torch.Tensor, List[torch.LongTensor]], torch.Tensor]:
634
+ # compute padding mask. This is needed for multi-head attention
635
+ data_x = input_nodes
636
+ n_graph, n_node = data_x.size()[:2]
637
+ padding_mask = (data_x[:, :, 0]).eq(0)
638
+ padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device, dtype=padding_mask.dtype)
639
+ padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
640
+
641
+ attn_bias = self.graph_attn_bias(input_nodes, attn_bias, spatial_pos, input_edges, attn_edge_type)
642
+
643
+ if token_embeddings is not None:
644
+ input_nodes = token_embeddings
645
+ else:
646
+ input_nodes = self.graph_node_feature(input_nodes, in_degree, out_degree)
647
+
648
+ if perturb is not None:
649
+ input_nodes[:, 1:, :] += perturb
650
+
651
+ if self.embed_scale is not None:
652
+ input_nodes = input_nodes * self.embed_scale
653
+
654
+ if self.quant_noise is not None:
655
+ input_nodes = self.quant_noise(input_nodes)
656
+
657
+ if self.emb_layer_norm is not None:
658
+ input_nodes = self.emb_layer_norm(input_nodes)
659
+
660
+ input_nodes = self.dropout_module(input_nodes)
661
+
662
+ input_nodes = input_nodes.transpose(0, 1)
663
+
664
+ inner_states = []
665
+ if not last_state_only:
666
+ inner_states.append(input_nodes)
667
+
668
+ for layer in self.layers:
669
+ input_nodes, _ = layer(
670
+ input_nodes,
671
+ self_attn_padding_mask=padding_mask,
672
+ self_attn_mask=attn_mask,
673
+ self_attn_bias=attn_bias,
674
+ )
675
+ if not last_state_only:
676
+ inner_states.append(input_nodes)
677
+
678
+ graph_rep = input_nodes[0, :, :]
679
+
680
+ if last_state_only:
681
+ inner_states = [input_nodes]
682
+
683
+ if self.traceable:
684
+ return torch.stack(inner_states), graph_rep
685
+ else:
686
+ return inner_states, graph_rep
687
+
688
+
689
+ class GraphormerDecoderHead(nn.Module):
690
+ def __init__(self, embedding_dim: int, num_classes: int):
691
+ super().__init__()
692
+ """num_classes should be 1 for regression, or the number of classes for classification"""
693
+ self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
694
+ self.classifier = nn.Linear(embedding_dim, num_classes, bias=False)
695
+ self.num_classes = num_classes
696
+
697
+ def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor:
698
+ input_nodes = self.classifier(input_nodes)
699
+ input_nodes = input_nodes + self.lm_output_learned_bias
700
+ return input_nodes
701
+
702
+
703
+ class GraphormerPreTrainedModel(PreTrainedModel):
704
+ """
705
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
706
+ models.
707
+ """
708
+
709
+ config_class = GraphormerConfig
710
+ base_model_prefix = "graphormer"
711
+ main_input_name_nodes = "input_nodes"
712
+ main_input_name_edges = "input_edges"
713
+
714
+ def normal_(self, data: torch.Tensor):
715
+ # with FSDP, module params will be on CUDA, so we cast them back to CPU
716
+ # so that the RNG is consistent with and without FSDP
717
+ data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
718
+
719
+ def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]):
720
+ """
721
+ Initialize the weights specific to the Graphormer Model.
722
+ """
723
+ if isinstance(module, nn.Linear):
724
+ self.normal_(module.weight.data)
725
+ if module.bias is not None:
726
+ module.bias.data.zero_()
727
+ if isinstance(module, nn.Embedding):
728
+ self.normal_(module.weight.data)
729
+ if module.padding_idx is not None:
730
+ module.weight.data[module.padding_idx].zero_()
731
+ if isinstance(module, GraphormerMultiheadAttention):
732
+ self.normal_(module.q_proj.weight.data)
733
+ self.normal_(module.k_proj.weight.data)
734
+ self.normal_(module.v_proj.weight.data)
735
+
736
+ def _init_weights(
737
+ self,
738
+ module: Union[
739
+ nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder
740
+ ],
741
+ ):
742
+ """
743
+ Initialize the weights
744
+ """
745
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
746
+ # We might be missing part of the Linear init, dependant on the layer num
747
+ module.weight.data.normal_(mean=0.0, std=0.02)
748
+ if module.bias is not None:
749
+ module.bias.data.zero_()
750
+ elif isinstance(module, nn.Embedding):
751
+ module.weight.data.normal_(mean=0.0, std=0.02)
752
+ if module.padding_idx is not None:
753
+ module.weight.data[module.padding_idx].zero_()
754
+ elif isinstance(module, GraphormerMultiheadAttention):
755
+ module.q_proj.weight.data.normal_(mean=0.0, std=0.02)
756
+ module.k_proj.weight.data.normal_(mean=0.0, std=0.02)
757
+ module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
758
+ module.reset_parameters()
759
+ elif isinstance(module, nn.LayerNorm):
760
+ module.bias.data.zero_()
761
+ module.weight.data.fill_(1.0)
762
+ elif isinstance(module, GraphormerGraphEncoder):
763
+ if module.apply_graphormer_init:
764
+ module.apply(self.init_graphormer_params)
765
+
766
+ elif isinstance(module, nn.LayerNorm):
767
+ module.bias.data.zero_()
768
+ module.weight.data.fill_(1.0)
769
+
770
+
771
+ class GraphormerModel(GraphormerPreTrainedModel):
772
+ """The Graphormer model is a graph-encoder model.
773
+
774
+ It goes from a graph to its representation. If you want to use the model for a downstream classification task, use
775
+ GraphormerForGraphClassification instead. For any other downstream task, feel free to add a new class, or combine
776
+ this model with a downstream model of your choice, following the example in GraphormerForGraphClassification.
777
+ """
778
+
779
+ def __init__(self, config: GraphormerConfig):
780
+ super().__init__(config)
781
+ self.max_nodes = config.max_nodes
782
+
783
+ self.graph_encoder = GraphormerGraphEncoder(config)
784
+
785
+ self.share_input_output_embed = config.share_input_output_embed
786
+ self.lm_output_learned_bias = None
787
+
788
+ # Remove head is set to true during fine-tuning
789
+ self.load_softmax = not getattr(config, "remove_head", False)
790
+
791
+ self.lm_head_transform_weight = nn.Linear(config.embedding_dim, config.embedding_dim)
792
+ self.activation_fn = ACT2FN[config.activation_fn]
793
+ self.layer_norm = nn.LayerNorm(config.embedding_dim)
794
+
795
+ self.post_init()
796
+
797
+ def reset_output_layer_parameters(self):
798
+ self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
799
+
800
+ def forward(
801
+ self,
802
+ input_nodes: torch.LongTensor,
803
+ input_edges: torch.LongTensor,
804
+ attn_bias: torch.Tensor,
805
+ in_degree: torch.LongTensor,
806
+ out_degree: torch.LongTensor,
807
+ spatial_pos: torch.LongTensor,
808
+ attn_edge_type: torch.LongTensor,
809
+ perturb: Optional[torch.FloatTensor] = None,
810
+ masked_tokens: None = None,
811
+ return_dict: Optional[bool] = None,
812
+ **unused,
813
+ ) -> Union[Tuple[torch.LongTensor], BaseModelOutputWithNoAttention]:
814
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
815
+
816
+ inner_states, graph_rep = self.graph_encoder(
817
+ input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, perturb=perturb
818
+ )
819
+
820
+ # last inner state, then revert Batch and Graph len
821
+ input_nodes = inner_states[-1].transpose(0, 1)
822
+
823
+ # project masked tokens only
824
+ if masked_tokens is not None:
825
+ raise NotImplementedError
826
+
827
+ input_nodes = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(input_nodes)))
828
+
829
+ # project back to size of vocabulary
830
+ if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, "weight"):
831
+ input_nodes = torch.nn.functional.linear(input_nodes, self.graph_encoder.embed_tokens.weight)
832
+
833
+ if not return_dict:
834
+ return tuple(x for x in [input_nodes, inner_states] if x is not None)
835
+ return BaseModelOutputWithNoAttention(last_hidden_state=input_nodes, hidden_states=inner_states)
836
+
837
+ def max_nodes(self):
838
+ """Maximum output length supported by the encoder."""
839
+ return self.max_nodes
840
+
841
+
842
+ class GraphormerForGraphClassification(GraphormerPreTrainedModel):
843
+ """
844
+ This model can be used for graph-level classification or regression tasks.
845
+
846
+ It can be trained on
847
+ - regression (by setting config.num_classes to 1); there should be one float-type label per graph
848
+ - one task classification (by setting config.num_classes to the number of classes); there should be one integer
849
+ label per graph
850
+ - binary multi-task classification (by setting config.num_classes to the number of labels); there should be a list
851
+ of integer labels for each graph.
852
+ """
853
+
854
+ def __init__(self, config: GraphormerConfig):
855
+ super().__init__(config)
856
+ self.encoder = GraphormerModel(config)
857
+ self.embedding_dim = config.embedding_dim
858
+ self.num_classes = config.num_classes
859
+ self.classifier = GraphormerDecoderHead(self.embedding_dim, self.num_classes)
860
+ self.is_encoder_decoder = True
861
+
862
+ # Initialize weights and apply final processing
863
+ self.post_init()
864
+
865
+ def forward(
866
+ self,
867
+ input_nodes: torch.LongTensor,
868
+ input_edges: torch.LongTensor,
869
+ attn_bias: torch.Tensor,
870
+ in_degree: torch.LongTensor,
871
+ out_degree: torch.LongTensor,
872
+ spatial_pos: torch.LongTensor,
873
+ attn_edge_type: torch.LongTensor,
874
+ labels: Optional[torch.LongTensor] = None,
875
+ return_dict: Optional[bool] = None,
876
+ **unused,
877
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
878
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
879
+
880
+ encoder_outputs = self.encoder(
881
+ input_nodes,
882
+ input_edges,
883
+ attn_bias,
884
+ in_degree,
885
+ out_degree,
886
+ spatial_pos,
887
+ attn_edge_type,
888
+ return_dict=True,
889
+ )
890
+ outputs, hidden_states = encoder_outputs["last_hidden_state"], encoder_outputs["hidden_states"]
891
+
892
+ head_outputs = self.classifier(outputs)
893
+ logits = head_outputs[:, 0, :].contiguous()
894
+
895
+ loss = None
896
+ if labels is not None:
897
+ mask = ~torch.isnan(labels)
898
+
899
+ if self.num_classes == 1: # regression
900
+ loss_fct = MSELoss()
901
+ loss = loss_fct(logits[mask].squeeze(), labels[mask].squeeze().float())
902
+ elif self.num_classes > 1 and len(labels.shape) == 1: # One task classification
903
+ loss_fct = CrossEntropyLoss()
904
+ loss = loss_fct(logits[mask].view(-1, self.num_classes), labels[mask].view(-1))
905
+ else: # Binary multi-task classification
906
+ loss_fct = BCEWithLogitsLoss(reduction="sum")
907
+ loss = loss_fct(logits[mask], labels[mask])
908
+
909
+ if not return_dict:
910
+ return tuple(x for x in [loss, logits, hidden_states] if x is not None)
911
+ return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None)
venv/lib/python3.10/site-packages/transformers/models/levit/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig", "LevitOnnxConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["feature_extraction_levit"] = ["LevitFeatureExtractor"]
28
+ _import_structure["image_processing_levit"] = ["LevitImageProcessor"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_levit"] = [
37
+ "LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "LevitForImageClassification",
39
+ "LevitForImageClassificationWithTeacher",
40
+ "LevitModel",
41
+ "LevitPreTrainedModel",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig, LevitOnnxConfig
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .feature_extraction_levit import LevitFeatureExtractor
55
+ from .image_processing_levit import LevitImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_levit import (
64
+ LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ LevitForImageClassification,
66
+ LevitForImageClassificationWithTeacher,
67
+ LevitModel,
68
+ LevitPreTrainedModel,
69
+ )
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/configuration_levit.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/convert_levit_timm_to_pytorch.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/feature_extraction_levit.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/image_processing_levit.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/levit/__pycache__/modeling_levit.cpython-310.pyc ADDED
Binary file (21.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LeViT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class LevitConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT
36
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
37
+ defaults will yield a similar configuration to that of the LeViT
38
+ [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ image_size (`int`, *optional*, defaults to 224):
45
+ The size of the input image.
46
+ num_channels (`int`, *optional*, defaults to 3):
47
+ Number of channels in the input image.
48
+ kernel_size (`int`, *optional*, defaults to 3):
49
+ The kernel size for the initial convolution layers of patch embedding.
50
+ stride (`int`, *optional*, defaults to 2):
51
+ The stride size for the initial convolution layers of patch embedding.
52
+ padding (`int`, *optional*, defaults to 1):
53
+ The padding size for the initial convolution layers of patch embedding.
54
+ patch_size (`int`, *optional*, defaults to 16):
55
+ The patch size for embeddings.
56
+ hidden_sizes (`List[int]`, *optional*, defaults to `[128, 256, 384]`):
57
+ Dimension of each of the encoder blocks.
58
+ num_attention_heads (`List[int]`, *optional*, defaults to `[4, 8, 12]`):
59
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
60
+ depths (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
61
+ The number of layers in each encoder block.
62
+ key_dim (`List[int]`, *optional*, defaults to `[16, 16, 16]`):
63
+ The size of key in each of the encoder blocks.
64
+ drop_path_rate (`int`, *optional*, defaults to 0):
65
+ The dropout probability for stochastic depths, used in the blocks of the Transformer encoder.
66
+ mlp_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
67
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
68
+ encoder blocks.
69
+ attention_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
70
+ Ratio of the size of the output dimension compared to input dimension of attention layers.
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import LevitConfig, LevitModel
78
+
79
+ >>> # Initializing a LeViT levit-128S style configuration
80
+ >>> configuration = LevitConfig()
81
+
82
+ >>> # Initializing a model (with random weights) from the levit-128S style configuration
83
+ >>> model = LevitModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+
89
+ model_type = "levit"
90
+
91
+ def __init__(
92
+ self,
93
+ image_size=224,
94
+ num_channels=3,
95
+ kernel_size=3,
96
+ stride=2,
97
+ padding=1,
98
+ patch_size=16,
99
+ hidden_sizes=[128, 256, 384],
100
+ num_attention_heads=[4, 8, 12],
101
+ depths=[4, 4, 4],
102
+ key_dim=[16, 16, 16],
103
+ drop_path_rate=0,
104
+ mlp_ratio=[2, 2, 2],
105
+ attention_ratio=[2, 2, 2],
106
+ initializer_range=0.02,
107
+ **kwargs,
108
+ ):
109
+ super().__init__(**kwargs)
110
+ self.image_size = image_size
111
+ self.num_channels = num_channels
112
+ self.kernel_size = kernel_size
113
+ self.stride = stride
114
+ self.padding = padding
115
+ self.hidden_sizes = hidden_sizes
116
+ self.num_attention_heads = num_attention_heads
117
+ self.depths = depths
118
+ self.key_dim = key_dim
119
+ self.drop_path_rate = drop_path_rate
120
+ self.patch_size = patch_size
121
+ self.attention_ratio = attention_ratio
122
+ self.mlp_ratio = mlp_ratio
123
+ self.initializer_range = initializer_range
124
+ self.down_ops = [
125
+ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
126
+ ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
127
+ ]
128
+
129
+
130
+ # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
131
+ class LevitOnnxConfig(OnnxConfig):
132
+ torch_onnx_minimum_version = version.parse("1.11")
133
+
134
+ @property
135
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
136
+ return OrderedDict(
137
+ [
138
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
139
+ ]
140
+ )
141
+
142
+ @property
143
+ def atol_for_validation(self) -> float:
144
+ return 1e-4
venv/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert LeViT checkpoints from timm."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from collections import OrderedDict
21
+ from functools import partial
22
+ from pathlib import Path
23
+
24
+ import timm
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+
28
+ from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
29
+ from transformers.utils import logging
30
+
31
+
32
+ logging.set_verbosity_info()
33
+ logger = logging.get_logger()
34
+
35
+
36
+ def convert_weight_and_push(
37
+ hidden_sizes: int, name: str, config: LevitConfig, save_directory: Path, push_to_hub: bool = True
38
+ ):
39
+ print(f"Converting {name}...")
40
+
41
+ with torch.no_grad():
42
+ if hidden_sizes == 128:
43
+ if name[-1] == "S":
44
+ from_model = timm.create_model("levit_128s", pretrained=True)
45
+ else:
46
+ from_model = timm.create_model("levit_128", pretrained=True)
47
+ if hidden_sizes == 192:
48
+ from_model = timm.create_model("levit_192", pretrained=True)
49
+ if hidden_sizes == 256:
50
+ from_model = timm.create_model("levit_256", pretrained=True)
51
+ if hidden_sizes == 384:
52
+ from_model = timm.create_model("levit_384", pretrained=True)
53
+
54
+ from_model.eval()
55
+ our_model = LevitForImageClassificationWithTeacher(config).eval()
56
+ huggingface_weights = OrderedDict()
57
+
58
+ weights = from_model.state_dict()
59
+ og_keys = list(from_model.state_dict().keys())
60
+ new_keys = list(our_model.state_dict().keys())
61
+ print(len(og_keys), len(new_keys))
62
+ for i in range(len(og_keys)):
63
+ huggingface_weights[new_keys[i]] = weights[og_keys[i]]
64
+ our_model.load_state_dict(huggingface_weights)
65
+
66
+ x = torch.randn((2, 3, 224, 224))
67
+ out1 = from_model(x)
68
+ out2 = our_model(x).logits
69
+
70
+ assert torch.allclose(out1, out2), "The model logits don't match the original one."
71
+
72
+ checkpoint_name = name
73
+ print(checkpoint_name)
74
+
75
+ if push_to_hub:
76
+ our_model.save_pretrained(save_directory / checkpoint_name)
77
+ image_processor = LevitImageProcessor()
78
+ image_processor.save_pretrained(save_directory / checkpoint_name)
79
+
80
+ print(f"Pushed {checkpoint_name}")
81
+
82
+
83
+ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
84
+ filename = "imagenet-1k-id2label.json"
85
+ num_labels = 1000
86
+ expected_shape = (1, num_labels)
87
+
88
+ repo_id = "huggingface/label-files"
89
+ num_labels = num_labels
90
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
91
+ id2label = {int(k): v for k, v in id2label.items()}
92
+
93
+ id2label = id2label
94
+ label2id = {v: k for k, v in id2label.items()}
95
+
96
+ ImageNetPreTrainedConfig = partial(LevitConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
97
+
98
+ names_to_hidden_sizes = {
99
+ "levit-128S": 128,
100
+ "levit-128": 128,
101
+ "levit-192": 192,
102
+ "levit-256": 256,
103
+ "levit-384": 384,
104
+ }
105
+
106
+ names_to_config = {
107
+ "levit-128S": ImageNetPreTrainedConfig(
108
+ hidden_sizes=[128, 256, 384],
109
+ num_attention_heads=[4, 6, 8],
110
+ depths=[2, 3, 4],
111
+ key_dim=[16, 16, 16],
112
+ drop_path_rate=0,
113
+ ),
114
+ "levit-128": ImageNetPreTrainedConfig(
115
+ hidden_sizes=[128, 256, 384],
116
+ num_attention_heads=[4, 8, 12],
117
+ depths=[4, 4, 4],
118
+ key_dim=[16, 16, 16],
119
+ drop_path_rate=0,
120
+ ),
121
+ "levit-192": ImageNetPreTrainedConfig(
122
+ hidden_sizes=[192, 288, 384],
123
+ num_attention_heads=[3, 5, 6],
124
+ depths=[4, 4, 4],
125
+ key_dim=[32, 32, 32],
126
+ drop_path_rate=0,
127
+ ),
128
+ "levit-256": ImageNetPreTrainedConfig(
129
+ hidden_sizes=[256, 384, 512],
130
+ num_attention_heads=[4, 6, 8],
131
+ depths=[4, 4, 4],
132
+ key_dim=[32, 32, 32],
133
+ drop_path_rate=0,
134
+ ),
135
+ "levit-384": ImageNetPreTrainedConfig(
136
+ hidden_sizes=[384, 512, 768],
137
+ num_attention_heads=[6, 9, 12],
138
+ depths=[4, 4, 4],
139
+ key_dim=[32, 32, 32],
140
+ drop_path_rate=0.1,
141
+ ),
142
+ }
143
+
144
+ if model_name:
145
+ convert_weight_and_push(
146
+ names_to_hidden_sizes[model_name], model_name, names_to_config[model_name], save_directory, push_to_hub
147
+ )
148
+ else:
149
+ for model_name, config in names_to_config.items():
150
+ convert_weight_and_push(names_to_hidden_sizes[model_name], model_name, config, save_directory, push_to_hub)
151
+ return config, expected_shape
152
+
153
+
154
+ if __name__ == "__main__":
155
+ parser = argparse.ArgumentParser()
156
+ # Required parameters
157
+ parser.add_argument(
158
+ "--model_name",
159
+ default=None,
160
+ type=str,
161
+ help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
162
+ )
163
+ parser.add_argument(
164
+ "--pytorch_dump_folder_path",
165
+ default="levit-dump-folder/",
166
+ type=Path,
167
+ required=False,
168
+ help="Path to the output PyTorch model directory.",
169
+ )
170
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
171
+ parser.add_argument(
172
+ "--no-push_to_hub",
173
+ dest="push_to_hub",
174
+ action="store_false",
175
+ help="Do not push model and image processor to the hub",
176
+ )
177
+
178
+ args = parser.parse_args()
179
+ pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
180
+ pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
181
+ convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for LeViT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_levit import LevitImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class LevitFeatureExtractor(LevitImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use LevitImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LeViT."""
16
+
17
+ from typing import Dict, Iterable, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_DEFAULT_MEAN,
29
+ IMAGENET_DEFAULT_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class LevitImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a LeViT image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the
54
+ `do_resize` parameter in the `preprocess` method.
55
+ size (`Dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`):
56
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will
57
+ be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest
58
+ edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this
59
+ value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width,
60
+ size["shortest_egde"])`. Can be overridden by the `size` parameter in the `preprocess` method.
61
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
62
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
63
+ `preprocess` method.
64
+ do_center_crop (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden
66
+ by the `do_center_crop` parameter in the `preprocess` method.
67
+ crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
68
+ Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess`
69
+ method.
70
+ do_rescale (`bool`, *optional*, defaults to `True`):
71
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
72
+ `do_rescale` parameter in the `preprocess` method.
73
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
74
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
75
+ `preprocess` method.
76
+ do_normalize (`bool`, *optional*, defaults to `True`):
77
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
78
+ `preprocess` method.
79
+ image_mean (`List[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
80
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
81
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
82
+ image_std (`List[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
83
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
84
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
85
+ """
86
+
87
+ model_input_names = ["pixel_values"]
88
+
89
+ def __init__(
90
+ self,
91
+ do_resize: bool = True,
92
+ size: Dict[str, int] = None,
93
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
94
+ do_center_crop: bool = True,
95
+ crop_size: Dict[str, int] = None,
96
+ do_rescale: bool = True,
97
+ rescale_factor: Union[int, float] = 1 / 255,
98
+ do_normalize: bool = True,
99
+ image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN,
100
+ image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD,
101
+ **kwargs,
102
+ ) -> None:
103
+ super().__init__(**kwargs)
104
+ size = size if size is not None else {"shortest_edge": 224}
105
+ size = get_size_dict(size, default_to_square=False)
106
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
107
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
108
+
109
+ self.do_resize = do_resize
110
+ self.size = size
111
+ self.resample = resample
112
+ self.do_center_crop = do_center_crop
113
+ self.crop_size = crop_size
114
+ self.do_rescale = do_rescale
115
+ self.rescale_factor = rescale_factor
116
+ self.do_normalize = do_normalize
117
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
118
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
119
+ self._valid_processor_keys = [
120
+ "images",
121
+ "do_resize",
122
+ "size",
123
+ "resample",
124
+ "do_center_crop",
125
+ "crop_size",
126
+ "do_rescale",
127
+ "rescale_factor",
128
+ "do_normalize",
129
+ "image_mean",
130
+ "image_std",
131
+ "return_tensors",
132
+ "data_format",
133
+ "input_data_format",
134
+ ]
135
+
136
+ def resize(
137
+ self,
138
+ image: np.ndarray,
139
+ size: Dict[str, int],
140
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
141
+ data_format: Optional[Union[str, ChannelDimension]] = None,
142
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
143
+ **kwargs,
144
+ ) -> np.ndarray:
145
+ """
146
+ Resize an image.
147
+
148
+ If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
149
+ size["width"])`.
150
+
151
+ If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
152
+ The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
153
+ to `(size["shortest_egde"] * height / width, size["shortest_egde"])`.
154
+
155
+ Args:
156
+ image (`np.ndarray`):
157
+ Image to resize.
158
+ size (`Dict[str, int]`):
159
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
160
+ will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
161
+ `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
162
+ i.e, if height > width, then image will be rescaled to (size * height / width, size).
163
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
164
+ Resampling filter to use when resiizing the image.
165
+ data_format (`str` or `ChannelDimension`, *optional*):
166
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
167
+ input_data_format (`ChannelDimension` or `str`, *optional*):
168
+ The channel dimension format of the input image. If not provided, it will be inferred.
169
+ """
170
+ size_dict = get_size_dict(size, default_to_square=False)
171
+ # size_dict is a dict with either keys "height" and "width" or "shortest_edge"
172
+ if "shortest_edge" in size:
173
+ shortest_edge = int((256 / 224) * size["shortest_edge"])
174
+ output_size = get_resize_output_image_size(
175
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
176
+ )
177
+ size_dict = {"height": output_size[0], "width": output_size[1]}
178
+ if "height" not in size_dict or "width" not in size_dict:
179
+ raise ValueError(
180
+ f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}"
181
+ )
182
+ return resize(
183
+ image,
184
+ size=(size_dict["height"], size_dict["width"]),
185
+ resample=resample,
186
+ data_format=data_format,
187
+ input_data_format=input_data_format,
188
+ **kwargs,
189
+ )
190
+
191
+ def preprocess(
192
+ self,
193
+ images: ImageInput,
194
+ do_resize: Optional[bool] = None,
195
+ size: Optional[Dict[str, int]] = None,
196
+ resample: PILImageResampling = None,
197
+ do_center_crop: Optional[bool] = None,
198
+ crop_size: Optional[Dict[str, int]] = None,
199
+ do_rescale: Optional[bool] = None,
200
+ rescale_factor: Optional[float] = None,
201
+ do_normalize: Optional[bool] = None,
202
+ image_mean: Optional[Union[float, Iterable[float]]] = None,
203
+ image_std: Optional[Union[float, Iterable[float]]] = None,
204
+ return_tensors: Optional[TensorType] = None,
205
+ data_format: ChannelDimension = ChannelDimension.FIRST,
206
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
207
+ **kwargs,
208
+ ) -> BatchFeature:
209
+ """
210
+ Preprocess an image or batch of images to be used as input to a LeViT model.
211
+
212
+ Args:
213
+ images (`ImageInput`):
214
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
215
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
216
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
217
+ Whether to resize the image.
218
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
219
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
220
+ will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
221
+ `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
222
+ i.e, if height > width, then image will be rescaled to (size * height / width, size).
223
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
224
+ Resampling filter to use when resiizing the image.
225
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
226
+ Whether to center crop the image.
227
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
228
+ Size of the output image after center cropping. Crops images to (crop_size["height"],
229
+ crop_size["width"]).
230
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
231
+ Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.
232
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
233
+ Factor to rescale the image pixel values by.
234
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
235
+ Whether to normalize the image pixel values by `image_mean` and `image_std`.
236
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
237
+ Mean to normalize the image pixel values by.
238
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
239
+ Standard deviation to normalize the image pixel values by.
240
+ return_tensors (`str` or `TensorType`, *optional*):
241
+ The type of tensors to return. Can be one of:
242
+ - Unset: Return a list of `np.ndarray`.
243
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
244
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
245
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
246
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
247
+ data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
248
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
249
+ image is used. Can be one of:
250
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
251
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
252
+ input_data_format (`ChannelDimension` or `str`, *optional*):
253
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
254
+ from the input image. Can be one of:
255
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
256
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
257
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
258
+ """
259
+ do_resize = do_resize if do_resize is not None else self.do_resize
260
+ resample = resample if resample is not None else self.resample
261
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
262
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
263
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
264
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
265
+ image_mean = image_mean if image_mean is not None else self.image_mean
266
+ image_std = image_std if image_std is not None else self.image_std
267
+
268
+ size = size if size is not None else self.size
269
+ size = get_size_dict(size, default_to_square=False)
270
+ crop_size = crop_size if crop_size is not None else self.crop_size
271
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
272
+ images = make_list_of_images(images)
273
+
274
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
275
+
276
+ if not valid_images(images):
277
+ raise ValueError(
278
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
279
+ "torch.Tensor, tf.Tensor or jax.ndarray."
280
+ )
281
+ validate_preprocess_arguments(
282
+ do_rescale=do_rescale,
283
+ rescale_factor=rescale_factor,
284
+ do_normalize=do_normalize,
285
+ image_mean=image_mean,
286
+ image_std=image_std,
287
+ do_center_crop=do_center_crop,
288
+ crop_size=crop_size,
289
+ do_resize=do_resize,
290
+ size=size,
291
+ resample=resample,
292
+ )
293
+ # All transformations expect numpy arrays.
294
+ images = [to_numpy_array(image) for image in images]
295
+
296
+ if is_scaled_image(images[0]) and do_rescale:
297
+ logger.warning_once(
298
+ "It looks like you are trying to rescale already rescaled images. If the input"
299
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
300
+ )
301
+
302
+ if input_data_format is None:
303
+ # We assume that all images have the same channel dimension format.
304
+ input_data_format = infer_channel_dimension_format(images[0])
305
+
306
+ if do_resize:
307
+ images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images]
308
+
309
+ if do_center_crop:
310
+ images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images]
311
+
312
+ if do_rescale:
313
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
314
+
315
+ if do_normalize:
316
+ images = [
317
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
318
+ ]
319
+
320
+ images = [
321
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
322
+ ]
323
+
324
+ data = {"pixel_values": images}
325
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LeViT model."""
16
+
17
+ import itertools
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithNoAttention,
28
+ BaseModelOutputWithPoolingAndNoAttention,
29
+ ImageClassifierOutputWithNoAttention,
30
+ ModelOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
34
+ from .configuration_levit import LevitConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ # General docstring
40
+ _CONFIG_FOR_DOC = "LevitConfig"
41
+
42
+ # Base docstring
43
+ _CHECKPOINT_FOR_DOC = "facebook/levit-128S"
44
+ _EXPECTED_OUTPUT_SHAPE = [1, 16, 384]
45
+
46
+ # Image classification docstring
47
+ _IMAGE_CLASS_CHECKPOINT = "facebook/levit-128S"
48
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
49
+
50
+
51
+ from ..deprecated._archive_maps import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
52
+
53
+
54
+ @dataclass
55
+ class LevitForImageClassificationWithTeacherOutput(ModelOutput):
56
+ """
57
+ Output type of [`LevitForImageClassificationWithTeacher`].
58
+
59
+ Args:
60
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
61
+ Prediction scores as the average of the `cls_logits` and `distillation_logits`.
62
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
63
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
64
+ class token).
65
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
66
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
67
+ distillation token).
68
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
69
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
70
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
71
+ plus the initial embedding outputs.
72
+ """
73
+
74
+ logits: torch.FloatTensor = None
75
+ cls_logits: torch.FloatTensor = None
76
+ distillation_logits: torch.FloatTensor = None
77
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
78
+
79
+
80
+ class LevitConvEmbeddings(nn.Module):
81
+ """
82
+ LeViT Conv Embeddings with Batch Norm, used in the initial patch embedding layer.
83
+ """
84
+
85
+ def __init__(
86
+ self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bn_weight_init=1
87
+ ):
88
+ super().__init__()
89
+ self.convolution = nn.Conv2d(
90
+ in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False
91
+ )
92
+ self.batch_norm = nn.BatchNorm2d(out_channels)
93
+
94
+ def forward(self, embeddings):
95
+ embeddings = self.convolution(embeddings)
96
+ embeddings = self.batch_norm(embeddings)
97
+ return embeddings
98
+
99
+
100
+ class LevitPatchEmbeddings(nn.Module):
101
+ """
102
+ LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple
103
+ `LevitConvEmbeddings`.
104
+ """
105
+
106
+ def __init__(self, config):
107
+ super().__init__()
108
+ self.embedding_layer_1 = LevitConvEmbeddings(
109
+ config.num_channels, config.hidden_sizes[0] // 8, config.kernel_size, config.stride, config.padding
110
+ )
111
+ self.activation_layer_1 = nn.Hardswish()
112
+
113
+ self.embedding_layer_2 = LevitConvEmbeddings(
114
+ config.hidden_sizes[0] // 8, config.hidden_sizes[0] // 4, config.kernel_size, config.stride, config.padding
115
+ )
116
+ self.activation_layer_2 = nn.Hardswish()
117
+
118
+ self.embedding_layer_3 = LevitConvEmbeddings(
119
+ config.hidden_sizes[0] // 4, config.hidden_sizes[0] // 2, config.kernel_size, config.stride, config.padding
120
+ )
121
+ self.activation_layer_3 = nn.Hardswish()
122
+
123
+ self.embedding_layer_4 = LevitConvEmbeddings(
124
+ config.hidden_sizes[0] // 2, config.hidden_sizes[0], config.kernel_size, config.stride, config.padding
125
+ )
126
+ self.num_channels = config.num_channels
127
+
128
+ def forward(self, pixel_values):
129
+ num_channels = pixel_values.shape[1]
130
+ if num_channels != self.num_channels:
131
+ raise ValueError(
132
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
133
+ )
134
+ embeddings = self.embedding_layer_1(pixel_values)
135
+ embeddings = self.activation_layer_1(embeddings)
136
+ embeddings = self.embedding_layer_2(embeddings)
137
+ embeddings = self.activation_layer_2(embeddings)
138
+ embeddings = self.embedding_layer_3(embeddings)
139
+ embeddings = self.activation_layer_3(embeddings)
140
+ embeddings = self.embedding_layer_4(embeddings)
141
+ return embeddings.flatten(2).transpose(1, 2)
142
+
143
+
144
+ class MLPLayerWithBN(nn.Module):
145
+ def __init__(self, input_dim, output_dim, bn_weight_init=1):
146
+ super().__init__()
147
+ self.linear = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False)
148
+ self.batch_norm = nn.BatchNorm1d(output_dim)
149
+
150
+ def forward(self, hidden_state):
151
+ hidden_state = self.linear(hidden_state)
152
+ hidden_state = self.batch_norm(hidden_state.flatten(0, 1)).reshape_as(hidden_state)
153
+ return hidden_state
154
+
155
+
156
+ class LevitSubsample(nn.Module):
157
+ def __init__(self, stride, resolution):
158
+ super().__init__()
159
+ self.stride = stride
160
+ self.resolution = resolution
161
+
162
+ def forward(self, hidden_state):
163
+ batch_size, _, channels = hidden_state.shape
164
+ hidden_state = hidden_state.view(batch_size, self.resolution, self.resolution, channels)[
165
+ :, :: self.stride, :: self.stride
166
+ ].reshape(batch_size, -1, channels)
167
+ return hidden_state
168
+
169
+
170
+ class LevitAttention(nn.Module):
171
+ def __init__(self, hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution):
172
+ super().__init__()
173
+ self.num_attention_heads = num_attention_heads
174
+ self.scale = key_dim**-0.5
175
+ self.key_dim = key_dim
176
+ self.attention_ratio = attention_ratio
177
+ self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads * 2
178
+ self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
179
+
180
+ self.queries_keys_values = MLPLayerWithBN(hidden_sizes, self.out_dim_keys_values)
181
+ self.activation = nn.Hardswish()
182
+ self.projection = MLPLayerWithBN(self.out_dim_projection, hidden_sizes, bn_weight_init=0)
183
+
184
+ points = list(itertools.product(range(resolution), range(resolution)))
185
+ len_points = len(points)
186
+ attention_offsets, indices = {}, []
187
+ for p1 in points:
188
+ for p2 in points:
189
+ offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
190
+ if offset not in attention_offsets:
191
+ attention_offsets[offset] = len(attention_offsets)
192
+ indices.append(attention_offsets[offset])
193
+
194
+ self.attention_bias_cache = {}
195
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
196
+ self.register_buffer(
197
+ "attention_bias_idxs", torch.LongTensor(indices).view(len_points, len_points), persistent=False
198
+ )
199
+
200
+ @torch.no_grad()
201
+ def train(self, mode=True):
202
+ super().train(mode)
203
+ if mode and self.attention_bias_cache:
204
+ self.attention_bias_cache = {} # clear ab cache
205
+
206
+ def get_attention_biases(self, device):
207
+ if self.training:
208
+ return self.attention_biases[:, self.attention_bias_idxs]
209
+ else:
210
+ device_key = str(device)
211
+ if device_key not in self.attention_bias_cache:
212
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
213
+ return self.attention_bias_cache[device_key]
214
+
215
+ def forward(self, hidden_state):
216
+ batch_size, seq_length, _ = hidden_state.shape
217
+ queries_keys_values = self.queries_keys_values(hidden_state)
218
+ query, key, value = queries_keys_values.view(batch_size, seq_length, self.num_attention_heads, -1).split(
219
+ [self.key_dim, self.key_dim, self.attention_ratio * self.key_dim], dim=3
220
+ )
221
+ query = query.permute(0, 2, 1, 3)
222
+ key = key.permute(0, 2, 1, 3)
223
+ value = value.permute(0, 2, 1, 3)
224
+
225
+ attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
226
+ attention = attention.softmax(dim=-1)
227
+ hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, seq_length, self.out_dim_projection)
228
+ hidden_state = self.projection(self.activation(hidden_state))
229
+ return hidden_state
230
+
231
+
232
+ class LevitAttentionSubsample(nn.Module):
233
+ def __init__(
234
+ self,
235
+ input_dim,
236
+ output_dim,
237
+ key_dim,
238
+ num_attention_heads,
239
+ attention_ratio,
240
+ stride,
241
+ resolution_in,
242
+ resolution_out,
243
+ ):
244
+ super().__init__()
245
+ self.num_attention_heads = num_attention_heads
246
+ self.scale = key_dim**-0.5
247
+ self.key_dim = key_dim
248
+ self.attention_ratio = attention_ratio
249
+ self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads
250
+ self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
251
+ self.resolution_out = resolution_out
252
+ # resolution_in is the intial resolution, resoloution_out is final resolution after downsampling
253
+ self.keys_values = MLPLayerWithBN(input_dim, self.out_dim_keys_values)
254
+ self.queries_subsample = LevitSubsample(stride, resolution_in)
255
+ self.queries = MLPLayerWithBN(input_dim, key_dim * num_attention_heads)
256
+ self.activation = nn.Hardswish()
257
+ self.projection = MLPLayerWithBN(self.out_dim_projection, output_dim)
258
+
259
+ self.attention_bias_cache = {}
260
+
261
+ points = list(itertools.product(range(resolution_in), range(resolution_in)))
262
+ points_ = list(itertools.product(range(resolution_out), range(resolution_out)))
263
+ len_points, len_points_ = len(points), len(points_)
264
+ attention_offsets, indices = {}, []
265
+ for p1 in points_:
266
+ for p2 in points:
267
+ size = 1
268
+ offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), abs(p1[1] * stride - p2[1] + (size - 1) / 2))
269
+ if offset not in attention_offsets:
270
+ attention_offsets[offset] = len(attention_offsets)
271
+ indices.append(attention_offsets[offset])
272
+
273
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
274
+ self.register_buffer(
275
+ "attention_bias_idxs", torch.LongTensor(indices).view(len_points_, len_points), persistent=False
276
+ )
277
+
278
+ @torch.no_grad()
279
+ def train(self, mode=True):
280
+ super().train(mode)
281
+ if mode and self.attention_bias_cache:
282
+ self.attention_bias_cache = {} # clear ab cache
283
+
284
+ def get_attention_biases(self, device):
285
+ if self.training:
286
+ return self.attention_biases[:, self.attention_bias_idxs]
287
+ else:
288
+ device_key = str(device)
289
+ if device_key not in self.attention_bias_cache:
290
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
291
+ return self.attention_bias_cache[device_key]
292
+
293
+ def forward(self, hidden_state):
294
+ batch_size, seq_length, _ = hidden_state.shape
295
+ key, value = (
296
+ self.keys_values(hidden_state)
297
+ .view(batch_size, seq_length, self.num_attention_heads, -1)
298
+ .split([self.key_dim, self.attention_ratio * self.key_dim], dim=3)
299
+ )
300
+ key = key.permute(0, 2, 1, 3)
301
+ value = value.permute(0, 2, 1, 3)
302
+
303
+ query = self.queries(self.queries_subsample(hidden_state))
304
+ query = query.view(batch_size, self.resolution_out**2, self.num_attention_heads, self.key_dim).permute(
305
+ 0, 2, 1, 3
306
+ )
307
+
308
+ attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
309
+ attention = attention.softmax(dim=-1)
310
+ hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, -1, self.out_dim_projection)
311
+ hidden_state = self.projection(self.activation(hidden_state))
312
+ return hidden_state
313
+
314
+
315
+ class LevitMLPLayer(nn.Module):
316
+ """
317
+ MLP Layer with `2X` expansion in contrast to ViT with `4X`.
318
+ """
319
+
320
+ def __init__(self, input_dim, hidden_dim):
321
+ super().__init__()
322
+ self.linear_up = MLPLayerWithBN(input_dim, hidden_dim)
323
+ self.activation = nn.Hardswish()
324
+ self.linear_down = MLPLayerWithBN(hidden_dim, input_dim)
325
+
326
+ def forward(self, hidden_state):
327
+ hidden_state = self.linear_up(hidden_state)
328
+ hidden_state = self.activation(hidden_state)
329
+ hidden_state = self.linear_down(hidden_state)
330
+ return hidden_state
331
+
332
+
333
+ class LevitResidualLayer(nn.Module):
334
+ """
335
+ Residual Block for LeViT
336
+ """
337
+
338
+ def __init__(self, module, drop_rate):
339
+ super().__init__()
340
+ self.module = module
341
+ self.drop_rate = drop_rate
342
+
343
+ def forward(self, hidden_state):
344
+ if self.training and self.drop_rate > 0:
345
+ rnd = torch.rand(hidden_state.size(0), 1, 1, device=hidden_state.device)
346
+ rnd = rnd.ge_(self.drop_rate).div(1 - self.drop_rate).detach()
347
+ hidden_state = hidden_state + self.module(hidden_state) * rnd
348
+ return hidden_state
349
+ else:
350
+ hidden_state = hidden_state + self.module(hidden_state)
351
+ return hidden_state
352
+
353
+
354
+ class LevitStage(nn.Module):
355
+ """
356
+ LeViT Stage consisting of `LevitMLPLayer` and `LevitAttention` layers.
357
+ """
358
+
359
+ def __init__(
360
+ self,
361
+ config,
362
+ idx,
363
+ hidden_sizes,
364
+ key_dim,
365
+ depths,
366
+ num_attention_heads,
367
+ attention_ratio,
368
+ mlp_ratio,
369
+ down_ops,
370
+ resolution_in,
371
+ ):
372
+ super().__init__()
373
+ self.layers = []
374
+ self.config = config
375
+ self.resolution_in = resolution_in
376
+ # resolution_in is the intial resolution, resolution_out is final resolution after downsampling
377
+ for _ in range(depths):
378
+ self.layers.append(
379
+ LevitResidualLayer(
380
+ LevitAttention(hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution_in),
381
+ self.config.drop_path_rate,
382
+ )
383
+ )
384
+ if mlp_ratio > 0:
385
+ hidden_dim = hidden_sizes * mlp_ratio
386
+ self.layers.append(
387
+ LevitResidualLayer(LevitMLPLayer(hidden_sizes, hidden_dim), self.config.drop_path_rate)
388
+ )
389
+
390
+ if down_ops[0] == "Subsample":
391
+ self.resolution_out = (self.resolution_in - 1) // down_ops[5] + 1
392
+ self.layers.append(
393
+ LevitAttentionSubsample(
394
+ *self.config.hidden_sizes[idx : idx + 2],
395
+ key_dim=down_ops[1],
396
+ num_attention_heads=down_ops[2],
397
+ attention_ratio=down_ops[3],
398
+ stride=down_ops[5],
399
+ resolution_in=resolution_in,
400
+ resolution_out=self.resolution_out,
401
+ )
402
+ )
403
+ self.resolution_in = self.resolution_out
404
+ if down_ops[4] > 0:
405
+ hidden_dim = self.config.hidden_sizes[idx + 1] * down_ops[4]
406
+ self.layers.append(
407
+ LevitResidualLayer(
408
+ LevitMLPLayer(self.config.hidden_sizes[idx + 1], hidden_dim), self.config.drop_path_rate
409
+ )
410
+ )
411
+
412
+ self.layers = nn.ModuleList(self.layers)
413
+
414
+ def get_resolution(self):
415
+ return self.resolution_in
416
+
417
+ def forward(self, hidden_state):
418
+ for layer in self.layers:
419
+ hidden_state = layer(hidden_state)
420
+ return hidden_state
421
+
422
+
423
+ class LevitEncoder(nn.Module):
424
+ """
425
+ LeViT Encoder consisting of multiple `LevitStage` stages.
426
+ """
427
+
428
+ def __init__(self, config):
429
+ super().__init__()
430
+ self.config = config
431
+ resolution = self.config.image_size // self.config.patch_size
432
+ self.stages = []
433
+ self.config.down_ops.append([""])
434
+
435
+ for stage_idx in range(len(config.depths)):
436
+ stage = LevitStage(
437
+ config,
438
+ stage_idx,
439
+ config.hidden_sizes[stage_idx],
440
+ config.key_dim[stage_idx],
441
+ config.depths[stage_idx],
442
+ config.num_attention_heads[stage_idx],
443
+ config.attention_ratio[stage_idx],
444
+ config.mlp_ratio[stage_idx],
445
+ config.down_ops[stage_idx],
446
+ resolution,
447
+ )
448
+ resolution = stage.get_resolution()
449
+ self.stages.append(stage)
450
+
451
+ self.stages = nn.ModuleList(self.stages)
452
+
453
+ def forward(self, hidden_state, output_hidden_states=False, return_dict=True):
454
+ all_hidden_states = () if output_hidden_states else None
455
+
456
+ for stage in self.stages:
457
+ if output_hidden_states:
458
+ all_hidden_states = all_hidden_states + (hidden_state,)
459
+ hidden_state = stage(hidden_state)
460
+
461
+ if output_hidden_states:
462
+ all_hidden_states = all_hidden_states + (hidden_state,)
463
+ if not return_dict:
464
+ return tuple(v for v in [hidden_state, all_hidden_states] if v is not None)
465
+
466
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
467
+
468
+
469
+ class LevitClassificationLayer(nn.Module):
470
+ """
471
+ LeViT Classification Layer
472
+ """
473
+
474
+ def __init__(self, input_dim, output_dim):
475
+ super().__init__()
476
+ self.batch_norm = nn.BatchNorm1d(input_dim)
477
+ self.linear = nn.Linear(input_dim, output_dim)
478
+
479
+ def forward(self, hidden_state):
480
+ hidden_state = self.batch_norm(hidden_state)
481
+ logits = self.linear(hidden_state)
482
+ return logits
483
+
484
+
485
+ class LevitPreTrainedModel(PreTrainedModel):
486
+ """
487
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
488
+ models.
489
+ """
490
+
491
+ config_class = LevitConfig
492
+ base_model_prefix = "levit"
493
+ main_input_name = "pixel_values"
494
+
495
+ def _init_weights(self, module):
496
+ """Initialize the weights"""
497
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
498
+ # Slightly different from the TF version which uses truncated_normal for initialization
499
+ # cf https://github.com/pytorch/pytorch/pull/5617
500
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
501
+ if module.bias is not None:
502
+ module.bias.data.zero_()
503
+ elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
504
+ module.bias.data.zero_()
505
+ module.weight.data.fill_(1.0)
506
+
507
+
508
+ LEVIT_START_DOCSTRING = r"""
509
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
510
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
511
+ behavior.
512
+
513
+ Parameters:
514
+ config ([`LevitConfig`]): Model configuration class with all the parameters of the model.
515
+ Initializing with a config file does not load the weights associated with the model, only the
516
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
517
+ """
518
+
519
+ LEVIT_INPUTS_DOCSTRING = r"""
520
+ Args:
521
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
522
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
523
+ [`LevitImageProcessor.__call__`] for details.
524
+
525
+ output_hidden_states (`bool`, *optional*):
526
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
527
+ more detail.
528
+ return_dict (`bool`, *optional*):
529
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
530
+ """
531
+
532
+
533
+ @add_start_docstrings(
534
+ "The bare Levit model outputting raw features without any specific head on top.",
535
+ LEVIT_START_DOCSTRING,
536
+ )
537
+ class LevitModel(LevitPreTrainedModel):
538
+ def __init__(self, config):
539
+ super().__init__(config)
540
+ self.config = config
541
+ self.patch_embeddings = LevitPatchEmbeddings(config)
542
+ self.encoder = LevitEncoder(config)
543
+ # Initialize weights and apply final processing
544
+ self.post_init()
545
+
546
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
547
+ @add_code_sample_docstrings(
548
+ checkpoint=_CHECKPOINT_FOR_DOC,
549
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
550
+ config_class=_CONFIG_FOR_DOC,
551
+ modality="vision",
552
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
553
+ )
554
+ def forward(
555
+ self,
556
+ pixel_values: torch.FloatTensor = None,
557
+ output_hidden_states: Optional[bool] = None,
558
+ return_dict: Optional[bool] = None,
559
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
560
+ output_hidden_states = (
561
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
562
+ )
563
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
564
+
565
+ if pixel_values is None:
566
+ raise ValueError("You have to specify pixel_values")
567
+
568
+ embeddings = self.patch_embeddings(pixel_values)
569
+ encoder_outputs = self.encoder(
570
+ embeddings,
571
+ output_hidden_states=output_hidden_states,
572
+ return_dict=return_dict,
573
+ )
574
+
575
+ last_hidden_state = encoder_outputs[0]
576
+
577
+ # global average pooling, (batch_size, seq_length, hidden_sizes) -> (batch_size, hidden_sizes)
578
+ pooled_output = last_hidden_state.mean(dim=1)
579
+
580
+ if not return_dict:
581
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
582
+
583
+ return BaseModelOutputWithPoolingAndNoAttention(
584
+ last_hidden_state=last_hidden_state,
585
+ pooler_output=pooled_output,
586
+ hidden_states=encoder_outputs.hidden_states,
587
+ )
588
+
589
+
590
+ @add_start_docstrings(
591
+ """
592
+ Levit Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
593
+ ImageNet.
594
+ """,
595
+ LEVIT_START_DOCSTRING,
596
+ )
597
+ class LevitForImageClassification(LevitPreTrainedModel):
598
+ def __init__(self, config):
599
+ super().__init__(config)
600
+ self.config = config
601
+ self.num_labels = config.num_labels
602
+ self.levit = LevitModel(config)
603
+
604
+ # Classifier head
605
+ self.classifier = (
606
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
607
+ if config.num_labels > 0
608
+ else torch.nn.Identity()
609
+ )
610
+
611
+ # Initialize weights and apply final processing
612
+ self.post_init()
613
+
614
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
615
+ @add_code_sample_docstrings(
616
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
617
+ output_type=ImageClassifierOutputWithNoAttention,
618
+ config_class=_CONFIG_FOR_DOC,
619
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
620
+ )
621
+ def forward(
622
+ self,
623
+ pixel_values: torch.FloatTensor = None,
624
+ labels: Optional[torch.LongTensor] = None,
625
+ output_hidden_states: Optional[bool] = None,
626
+ return_dict: Optional[bool] = None,
627
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
628
+ r"""
629
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
630
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
631
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
632
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
633
+ """
634
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
635
+
636
+ outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
637
+
638
+ sequence_output = outputs[0]
639
+ sequence_output = sequence_output.mean(1)
640
+ logits = self.classifier(sequence_output)
641
+
642
+ loss = None
643
+ if labels is not None:
644
+ if self.config.problem_type is None:
645
+ if self.num_labels == 1:
646
+ self.config.problem_type = "regression"
647
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
648
+ self.config.problem_type = "single_label_classification"
649
+ else:
650
+ self.config.problem_type = "multi_label_classification"
651
+
652
+ if self.config.problem_type == "regression":
653
+ loss_fct = MSELoss()
654
+ if self.num_labels == 1:
655
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
656
+ else:
657
+ loss = loss_fct(logits, labels)
658
+ elif self.config.problem_type == "single_label_classification":
659
+ loss_fct = CrossEntropyLoss()
660
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
661
+ elif self.config.problem_type == "multi_label_classification":
662
+ loss_fct = BCEWithLogitsLoss()
663
+ loss = loss_fct(logits, labels)
664
+ if not return_dict:
665
+ output = (logits,) + outputs[2:]
666
+ return ((loss,) + output) if loss is not None else output
667
+
668
+ return ImageClassifierOutputWithNoAttention(
669
+ loss=loss,
670
+ logits=logits,
671
+ hidden_states=outputs.hidden_states,
672
+ )
673
+
674
+
675
+ @add_start_docstrings(
676
+ """
677
+ LeViT Model transformer with image classification heads on top (a linear layer on top of the final hidden state and
678
+ a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet. .. warning::
679
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
680
+ supported.
681
+ """,
682
+ LEVIT_START_DOCSTRING,
683
+ )
684
+ class LevitForImageClassificationWithTeacher(LevitPreTrainedModel):
685
+ def __init__(self, config):
686
+ super().__init__(config)
687
+ self.config = config
688
+ self.num_labels = config.num_labels
689
+ self.levit = LevitModel(config)
690
+
691
+ # Classifier head
692
+ self.classifier = (
693
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
694
+ if config.num_labels > 0
695
+ else torch.nn.Identity()
696
+ )
697
+ self.classifier_distill = (
698
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
699
+ if config.num_labels > 0
700
+ else torch.nn.Identity()
701
+ )
702
+
703
+ # Initialize weights and apply final processing
704
+ self.post_init()
705
+
706
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
707
+ @add_code_sample_docstrings(
708
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
709
+ output_type=LevitForImageClassificationWithTeacherOutput,
710
+ config_class=_CONFIG_FOR_DOC,
711
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
712
+ )
713
+ def forward(
714
+ self,
715
+ pixel_values: torch.FloatTensor = None,
716
+ output_hidden_states: Optional[bool] = None,
717
+ return_dict: Optional[bool] = None,
718
+ ) -> Union[Tuple, LevitForImageClassificationWithTeacherOutput]:
719
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
720
+
721
+ outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
722
+
723
+ sequence_output = outputs[0]
724
+ sequence_output = sequence_output.mean(1)
725
+ cls_logits, distill_logits = self.classifier(sequence_output), self.classifier_distill(sequence_output)
726
+ logits = (cls_logits + distill_logits) / 2
727
+
728
+ if not return_dict:
729
+ output = (logits, cls_logits, distill_logits) + outputs[2:]
730
+ return output
731
+
732
+ return LevitForImageClassificationWithTeacherOutput(
733
+ logits=logits,
734
+ cls_logits=cls_logits,
735
+ distillation_logits=distill_logits,
736
+ hidden_states=outputs.hidden_states,
737
+ )
venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/feature_extraction_markuplm.cpython-310.pyc ADDED
Binary file (5.18 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/modeling_markuplm.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/processing_markuplm.cpython-310.pyc ADDED
Binary file (5.21 kB). View file