applied-ai-018 commited on
Commit
abc1575
·
verified ·
1 Parent(s): 826f2ea

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/convert_canine_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/modeling_canine.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/configuration_canine.py +141 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/modeling_canine.py +1645 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__init__.py +71 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/convert_clipseg_original_pytorch_to_hf.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/modeling_clipseg.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/processing_clipseg.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/configuration_clipseg.py +432 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py +264 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/modeling_clipseg.py +1477 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/processing_clipseg.py +161 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py +46 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py +43 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py +197 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py +318 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py +57 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx +107 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py +134 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py +218 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/modeling_graphormer.py +911 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__init__.py +73 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/image_processing_idefics.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/modeling_idefics.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/processing_idefics.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/vision.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/perceiver.py +188 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/__init__.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/configuration_instructblip.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/processing_instructblip.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__init__.py +135 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/configuration_canine.cpython-310.pyc ADDED
Binary file (5.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/convert_canine_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/modeling_canine.cpython-310.pyc ADDED
Binary file (45.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc ADDED
Binary file (7.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/configuration_canine.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CANINE model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class CanineConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an
30
+ CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the CANINE
32
+ [google/canine-s](https://huggingface.co/google/canine-s) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimension of the encoder layers and the pooler layer.
41
+ num_hidden_layers (`int`, *optional*, defaults to 12):
42
+ Number of hidden layers in the deep Transformer encoder.
43
+ num_attention_heads (`int`, *optional*, defaults to 12):
44
+ Number of attention heads for each attention layer in the Transformer encoders.
45
+ intermediate_size (`int`, *optional*, defaults to 3072):
46
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoders.
47
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
48
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
49
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
50
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
51
+ The dropout probability for all fully connected layers in the embeddings, encoders, and pooler.
52
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
53
+ The dropout ratio for the attention probabilities.
54
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
55
+ The maximum sequence length that this model might ever be used with.
56
+ type_vocab_size (`int`, *optional*, defaults to 16):
57
+ The vocabulary size of the `token_type_ids` passed when calling [`CanineModel`].
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ pad_token_id (`int`, *optional*, defaults to 0):
63
+ Padding token id.
64
+ bos_token_id (`int`, *optional*, defaults to 57344):
65
+ Beginning of stream token id.
66
+ eos_token_id (`int`, *optional*, defaults to 57345):
67
+ End of stream token id.
68
+ downsampling_rate (`int`, *optional*, defaults to 4):
69
+ The rate at which to downsample the original character sequence length before applying the deep Transformer
70
+ encoder.
71
+ upsampling_kernel_size (`int`, *optional*, defaults to 4):
72
+ The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when
73
+ projecting back from `hidden_size`*2 to `hidden_size`.
74
+ num_hash_functions (`int`, *optional*, defaults to 8):
75
+ The number of hash functions to use. Each hash function has its own embedding matrix.
76
+ num_hash_buckets (`int`, *optional*, defaults to 16384):
77
+ The number of hash buckets to use.
78
+ local_transformer_stride (`int`, *optional*, defaults to 128):
79
+ The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good
80
+ TPU/XLA memory alignment.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ >>> from transformers import CanineConfig, CanineModel
86
+
87
+ >>> # Initializing a CANINE google/canine-s style configuration
88
+ >>> configuration = CanineConfig()
89
+
90
+ >>> # Initializing a model (with random weights) from the google/canine-s style configuration
91
+ >>> model = CanineModel(configuration)
92
+
93
+ >>> # Accessing the model configuration
94
+ >>> configuration = model.config
95
+ ```"""
96
+
97
+ model_type = "canine"
98
+
99
+ def __init__(
100
+ self,
101
+ hidden_size=768,
102
+ num_hidden_layers=12,
103
+ num_attention_heads=12,
104
+ intermediate_size=3072,
105
+ hidden_act="gelu",
106
+ hidden_dropout_prob=0.1,
107
+ attention_probs_dropout_prob=0.1,
108
+ max_position_embeddings=16384,
109
+ type_vocab_size=16,
110
+ initializer_range=0.02,
111
+ layer_norm_eps=1e-12,
112
+ pad_token_id=0,
113
+ bos_token_id=0xE000,
114
+ eos_token_id=0xE001,
115
+ downsampling_rate=4,
116
+ upsampling_kernel_size=4,
117
+ num_hash_functions=8,
118
+ num_hash_buckets=16384,
119
+ local_transformer_stride=128, # Good TPU/XLA memory alignment.
120
+ **kwargs,
121
+ ):
122
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
123
+
124
+ self.max_position_embeddings = max_position_embeddings
125
+ self.hidden_size = hidden_size
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.num_attention_heads = num_attention_heads
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_act = hidden_act
130
+ self.hidden_dropout_prob = hidden_dropout_prob
131
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
132
+ self.initializer_range = initializer_range
133
+ self.type_vocab_size = type_vocab_size
134
+ self.layer_norm_eps = layer_norm_eps
135
+
136
+ # Character config:
137
+ self.downsampling_rate = downsampling_rate
138
+ self.upsampling_kernel_size = upsampling_kernel_size
139
+ self.num_hash_functions = num_hash_functions
140
+ self.num_hash_buckets = num_hash_buckets
141
+ self.local_transformer_stride = local_transformer_stride
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/modeling_canine.py ADDED
@@ -0,0 +1,1645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google AI The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CANINE model."""
16
+
17
+
18
+ import copy
19
+ import math
20
+ import os
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutput,
32
+ ModelOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_canine import CanineConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "google/canine-s"
53
+ _CONFIG_FOR_DOC = "CanineConfig"
54
+
55
+
56
+ from ..deprecated._archive_maps import CANINE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ # Support up to 16 hash functions.
60
+ _PRIMES = [31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223]
61
+
62
+
63
+ @dataclass
64
+ class CanineModelOutputWithPooling(ModelOutput):
65
+ """
66
+ Output type of [`CanineModel`]. Based on [`~modeling_outputs.BaseModelOutputWithPooling`], but with slightly
67
+ different `hidden_states` and `attentions`, as these also include the hidden states and attentions of the shallow
68
+ Transformer encoders.
69
+
70
+ Args:
71
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
72
+ Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final
73
+ shallow Transformer encoder).
74
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
75
+ Hidden-state of the first token of the sequence (classification token) at the last layer of the deep
76
+ Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer
77
+ weights are trained from the next sentence prediction (classification) objective during pretraining.
78
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
79
+ Tuple of `torch.FloatTensor` (one for the input to each encoder + one for the output of each layer of each
80
+ encoder) of shape `(batch_size, sequence_length, hidden_size)` and `(batch_size, sequence_length //
81
+ config.downsampling_rate, hidden_size)`. Hidden-states of the model at the output of each layer plus the
82
+ initial input to each Transformer encoder. The hidden states of the shallow encoders have length
83
+ `sequence_length`, but the hidden states of the deep encoder have length `sequence_length` //
84
+ `config.downsampling_rate`.
85
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
86
+ Tuple of `torch.FloatTensor` (one for each layer) of the 3 Transformer encoders of shape `(batch_size,
87
+ num_heads, sequence_length, sequence_length)` and `(batch_size, num_heads, sequence_length //
88
+ config.downsampling_rate, sequence_length // config.downsampling_rate)`. Attentions weights after the
89
+ attention softmax, used to compute the weighted average in the self-attention heads.
90
+ """
91
+
92
+ last_hidden_state: torch.FloatTensor = None
93
+ pooler_output: torch.FloatTensor = None
94
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
95
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
96
+
97
+
98
+ def load_tf_weights_in_canine(model, config, tf_checkpoint_path):
99
+ """Load tf checkpoints in a pytorch model."""
100
+ try:
101
+ import re
102
+
103
+ import numpy as np
104
+ import tensorflow as tf
105
+ except ImportError:
106
+ logger.error(
107
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
108
+ "https://www.tensorflow.org/install/ for installation instructions."
109
+ )
110
+ raise
111
+ tf_path = os.path.abspath(tf_checkpoint_path)
112
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
113
+ # Load weights from TF model
114
+ init_vars = tf.train.list_variables(tf_path)
115
+ names = []
116
+ arrays = []
117
+ for name, shape in init_vars:
118
+ logger.info(f"Loading TF weight {name} with shape {shape}")
119
+ array = tf.train.load_variable(tf_path, name)
120
+ names.append(name)
121
+ arrays.append(array)
122
+
123
+ for name, array in zip(names, arrays):
124
+ name = name.split("/")
125
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
126
+ # which are not required for using pretrained model
127
+ # also discard the cls weights (which were used for the next sentence prediction pre-training task)
128
+ if any(
129
+ n
130
+ in [
131
+ "adam_v",
132
+ "adam_m",
133
+ "AdamWeightDecayOptimizer",
134
+ "AdamWeightDecayOptimizer_1",
135
+ "global_step",
136
+ "cls",
137
+ "autoregressive_decoder",
138
+ "char_output_weights",
139
+ ]
140
+ for n in name
141
+ ):
142
+ logger.info(f"Skipping {'/'.join(name)}")
143
+ continue
144
+ # if first scope name starts with "bert", change it to "encoder"
145
+ if name[0] == "bert":
146
+ name[0] = "encoder"
147
+ # remove "embeddings" middle name of HashBucketCodepointEmbedders
148
+ elif name[1] == "embeddings":
149
+ name.remove(name[1])
150
+ # rename segment_embeddings to token_type_embeddings
151
+ elif name[1] == "segment_embeddings":
152
+ name[1] = "token_type_embeddings"
153
+ # rename initial convolutional projection layer
154
+ elif name[1] == "initial_char_encoder":
155
+ name = ["chars_to_molecules"] + name[-2:]
156
+ # rename final convolutional projection layer
157
+ elif name[0] == "final_char_encoder" and name[1] in ["LayerNorm", "conv"]:
158
+ name = ["projection"] + name[1:]
159
+ pointer = model
160
+ for m_name in name:
161
+ if (re.fullmatch(r"[A-Za-z]+_\d+", m_name)) and "Embedder" not in m_name:
162
+ scope_names = re.split(r"_(\d+)", m_name)
163
+ else:
164
+ scope_names = [m_name]
165
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
166
+ pointer = getattr(pointer, "weight")
167
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
168
+ pointer = getattr(pointer, "bias")
169
+ elif scope_names[0] == "output_weights":
170
+ pointer = getattr(pointer, "weight")
171
+ else:
172
+ try:
173
+ pointer = getattr(pointer, scope_names[0])
174
+ except AttributeError:
175
+ logger.info(f"Skipping {'/'.join(name)}")
176
+ continue
177
+ if len(scope_names) >= 2:
178
+ num = int(scope_names[1])
179
+ pointer = pointer[num]
180
+ if m_name[-11:] == "_embeddings":
181
+ pointer = getattr(pointer, "weight")
182
+ elif m_name[-10:] in [f"Embedder_{i}" for i in range(8)]:
183
+ pointer = getattr(pointer, "weight")
184
+ elif m_name == "kernel":
185
+ array = np.transpose(array)
186
+
187
+ if pointer.shape != array.shape:
188
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
189
+
190
+ logger.info(f"Initialize PyTorch weight {name}")
191
+ pointer.data = torch.from_numpy(array)
192
+ return model
193
+
194
+
195
+ class CanineEmbeddings(nn.Module):
196
+ """Construct the character, position and token_type embeddings."""
197
+
198
+ def __init__(self, config):
199
+ super().__init__()
200
+
201
+ self.config = config
202
+
203
+ # character embeddings
204
+ shard_embedding_size = config.hidden_size // config.num_hash_functions
205
+ for i in range(config.num_hash_functions):
206
+ name = f"HashBucketCodepointEmbedder_{i}"
207
+ setattr(self, name, nn.Embedding(config.num_hash_buckets, shard_embedding_size))
208
+ self.char_position_embeddings = nn.Embedding(config.num_hash_buckets, config.hidden_size)
209
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
210
+
211
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
212
+ # any TensorFlow checkpoint file
213
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
214
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
215
+
216
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
217
+ self.register_buffer(
218
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
219
+ )
220
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
221
+
222
+ def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int):
223
+ """
224
+ Converts ids to hash bucket ids via multiple hashing.
225
+
226
+ Args:
227
+ input_ids: The codepoints or other IDs to be hashed.
228
+ num_hashes: The number of hash functions to use.
229
+ num_buckets: The number of hash buckets (i.e. embeddings in each table).
230
+
231
+ Returns:
232
+ A list of tensors, each of which is the hash bucket IDs from one hash function.
233
+ """
234
+ if num_hashes > len(_PRIMES):
235
+ raise ValueError(f"`num_hashes` must be <= {len(_PRIMES)}")
236
+
237
+ primes = _PRIMES[:num_hashes]
238
+
239
+ result_tensors = []
240
+ for prime in primes:
241
+ hashed = ((input_ids + 1) * prime) % num_buckets
242
+ result_tensors.append(hashed)
243
+ return result_tensors
244
+
245
+ def _embed_hash_buckets(self, input_ids, embedding_size: int, num_hashes: int, num_buckets: int):
246
+ """Converts IDs (e.g. codepoints) into embeddings via multiple hashing."""
247
+ if embedding_size % num_hashes != 0:
248
+ raise ValueError(f"Expected `embedding_size` ({embedding_size}) % `num_hashes` ({num_hashes}) == 0")
249
+
250
+ hash_bucket_tensors = self._hash_bucket_tensors(input_ids, num_hashes=num_hashes, num_buckets=num_buckets)
251
+ embedding_shards = []
252
+ for i, hash_bucket_ids in enumerate(hash_bucket_tensors):
253
+ name = f"HashBucketCodepointEmbedder_{i}"
254
+ shard_embeddings = getattr(self, name)(hash_bucket_ids)
255
+ embedding_shards.append(shard_embeddings)
256
+
257
+ return torch.cat(embedding_shards, dim=-1)
258
+
259
+ def forward(
260
+ self,
261
+ input_ids: Optional[torch.LongTensor] = None,
262
+ token_type_ids: Optional[torch.LongTensor] = None,
263
+ position_ids: Optional[torch.LongTensor] = None,
264
+ inputs_embeds: Optional[torch.FloatTensor] = None,
265
+ ) -> torch.FloatTensor:
266
+ if input_ids is not None:
267
+ input_shape = input_ids.size()
268
+ else:
269
+ input_shape = inputs_embeds.size()[:-1]
270
+
271
+ seq_length = input_shape[1]
272
+
273
+ if position_ids is None:
274
+ position_ids = self.position_ids[:, :seq_length]
275
+
276
+ if token_type_ids is None:
277
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
278
+
279
+ if inputs_embeds is None:
280
+ inputs_embeds = self._embed_hash_buckets(
281
+ input_ids, self.config.hidden_size, self.config.num_hash_functions, self.config.num_hash_buckets
282
+ )
283
+
284
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
285
+
286
+ embeddings = inputs_embeds + token_type_embeddings
287
+
288
+ if self.position_embedding_type == "absolute":
289
+ position_embeddings = self.char_position_embeddings(position_ids)
290
+ embeddings += position_embeddings
291
+ embeddings = self.LayerNorm(embeddings)
292
+ embeddings = self.dropout(embeddings)
293
+ return embeddings
294
+
295
+
296
+ class CharactersToMolecules(nn.Module):
297
+ """Convert character sequence to initial molecule sequence (i.e. downsample) using strided convolutions."""
298
+
299
+ def __init__(self, config):
300
+ super().__init__()
301
+
302
+ self.conv = nn.Conv1d(
303
+ in_channels=config.hidden_size,
304
+ out_channels=config.hidden_size,
305
+ kernel_size=config.downsampling_rate,
306
+ stride=config.downsampling_rate,
307
+ )
308
+ self.activation = ACT2FN[config.hidden_act]
309
+
310
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
311
+ # any TensorFlow checkpoint file
312
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
313
+
314
+ def forward(self, char_encoding: torch.Tensor) -> torch.Tensor:
315
+ # `cls_encoding`: [batch, 1, hidden_size]
316
+ cls_encoding = char_encoding[:, 0:1, :]
317
+
318
+ # char_encoding has shape [batch, char_seq, hidden_size]
319
+ # We transpose it to be [batch, hidden_size, char_seq]
320
+ char_encoding = torch.transpose(char_encoding, 1, 2)
321
+ downsampled = self.conv(char_encoding)
322
+ downsampled = torch.transpose(downsampled, 1, 2)
323
+ downsampled = self.activation(downsampled)
324
+
325
+ # Truncate the last molecule in order to reserve a position for [CLS].
326
+ # Often, the last position is never used (unless we completely fill the
327
+ # text buffer). This is important in order to maintain alignment on TPUs
328
+ # (i.e. a multiple of 128).
329
+ downsampled_truncated = downsampled[:, 0:-1, :]
330
+
331
+ # We also keep [CLS] as a separate sequence position since we always
332
+ # want to reserve a position (and the model capacity that goes along
333
+ # with that) in the deep BERT stack.
334
+ # `result`: [batch, molecule_seq, molecule_dim]
335
+ result = torch.cat([cls_encoding, downsampled_truncated], dim=1)
336
+
337
+ result = self.LayerNorm(result)
338
+
339
+ return result
340
+
341
+
342
+ class ConvProjection(nn.Module):
343
+ """
344
+ Project representations from hidden_size*2 back to hidden_size across a window of w = config.upsampling_kernel_size
345
+ characters.
346
+ """
347
+
348
+ def __init__(self, config):
349
+ super().__init__()
350
+ self.config = config
351
+ self.conv = nn.Conv1d(
352
+ in_channels=config.hidden_size * 2,
353
+ out_channels=config.hidden_size,
354
+ kernel_size=config.upsampling_kernel_size,
355
+ stride=1,
356
+ )
357
+ self.activation = ACT2FN[config.hidden_act]
358
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
359
+ # any TensorFlow checkpoint file
360
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
361
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
362
+
363
+ def forward(
364
+ self,
365
+ inputs: torch.Tensor,
366
+ final_seq_char_positions: Optional[torch.Tensor] = None,
367
+ ) -> torch.Tensor:
368
+ # inputs has shape [batch, mol_seq, molecule_hidden_size+char_hidden_final]
369
+ # we transpose it to be [batch, molecule_hidden_size+char_hidden_final, mol_seq]
370
+ inputs = torch.transpose(inputs, 1, 2)
371
+
372
+ # PyTorch < 1.9 does not support padding="same" (which is used in the original implementation),
373
+ # so we pad the tensor manually before passing it to the conv layer
374
+ # based on https://github.com/google-research/big_transfer/blob/49afe42338b62af9fbe18f0258197a33ee578a6b/bit_tf2/models.py#L36-L38
375
+ pad_total = self.config.upsampling_kernel_size - 1
376
+ pad_beg = pad_total // 2
377
+ pad_end = pad_total - pad_beg
378
+
379
+ pad = nn.ConstantPad1d((pad_beg, pad_end), 0)
380
+ # `result`: shape (batch_size, char_seq_len, hidden_size)
381
+ result = self.conv(pad(inputs))
382
+ result = torch.transpose(result, 1, 2)
383
+ result = self.activation(result)
384
+ result = self.LayerNorm(result)
385
+ result = self.dropout(result)
386
+ final_char_seq = result
387
+
388
+ if final_seq_char_positions is not None:
389
+ # Limit transformer query seq and attention mask to these character
390
+ # positions to greatly reduce the compute cost. Typically, this is just
391
+ # done for the MLM training task.
392
+ # TODO add support for MLM
393
+ raise NotImplementedError("CanineForMaskedLM is currently not supported")
394
+ else:
395
+ query_seq = final_char_seq
396
+
397
+ return query_seq
398
+
399
+
400
+ class CanineSelfAttention(nn.Module):
401
+ def __init__(self, config):
402
+ super().__init__()
403
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
404
+ raise ValueError(
405
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
406
+ f"heads ({config.num_attention_heads})"
407
+ )
408
+
409
+ self.num_attention_heads = config.num_attention_heads
410
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
411
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
412
+
413
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
414
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
415
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
416
+
417
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
418
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
419
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
420
+ self.max_position_embeddings = config.max_position_embeddings
421
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
422
+
423
+ def transpose_for_scores(self, x):
424
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
425
+ x = x.view(*new_x_shape)
426
+ return x.permute(0, 2, 1, 3)
427
+
428
+ def forward(
429
+ self,
430
+ from_tensor: torch.Tensor,
431
+ to_tensor: torch.Tensor,
432
+ attention_mask: Optional[torch.FloatTensor] = None,
433
+ head_mask: Optional[torch.FloatTensor] = None,
434
+ output_attentions: Optional[bool] = False,
435
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
436
+ mixed_query_layer = self.query(from_tensor)
437
+
438
+ # If this is instantiated as a cross-attention module, the keys
439
+ # and values come from an encoder; the attention mask needs to be
440
+ # such that the encoder's padding tokens are not attended to.
441
+
442
+ key_layer = self.transpose_for_scores(self.key(to_tensor))
443
+ value_layer = self.transpose_for_scores(self.value(to_tensor))
444
+
445
+ query_layer = self.transpose_for_scores(mixed_query_layer)
446
+
447
+ # Take the dot product between "query" and "key" to get the raw attention scores.
448
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
449
+
450
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
451
+ seq_length = from_tensor.size()[1]
452
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1)
453
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1)
454
+ distance = position_ids_l - position_ids_r
455
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
456
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
457
+
458
+ if self.position_embedding_type == "relative_key":
459
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
460
+ attention_scores = attention_scores + relative_position_scores
461
+ elif self.position_embedding_type == "relative_key_query":
462
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
463
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
464
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
465
+
466
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
467
+ if attention_mask is not None:
468
+ if attention_mask.ndim == 3:
469
+ # if attention_mask is 3D, do the following:
470
+ attention_mask = torch.unsqueeze(attention_mask, dim=1)
471
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
472
+ # masked positions, this operation will create a tensor which is 0.0 for
473
+ # positions we want to attend and the dtype's smallest value for masked positions.
474
+ attention_mask = (1.0 - attention_mask.float()) * torch.finfo(attention_scores.dtype).min
475
+ # Apply the attention mask (precomputed for all layers in CanineModel forward() function)
476
+ attention_scores = attention_scores + attention_mask
477
+
478
+ # Normalize the attention scores to probabilities.
479
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
480
+
481
+ # This is actually dropping out entire tokens to attend to, which might
482
+ # seem a bit unusual, but is taken from the original Transformer paper.
483
+ attention_probs = self.dropout(attention_probs)
484
+
485
+ # Mask heads if we want to
486
+ if head_mask is not None:
487
+ attention_probs = attention_probs * head_mask
488
+
489
+ context_layer = torch.matmul(attention_probs, value_layer)
490
+
491
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
492
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
493
+ context_layer = context_layer.view(*new_context_layer_shape)
494
+
495
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
496
+
497
+ return outputs
498
+
499
+
500
+ class CanineSelfOutput(nn.Module):
501
+ def __init__(self, config):
502
+ super().__init__()
503
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
504
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
505
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
506
+
507
+ def forward(
508
+ self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor
509
+ ) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
510
+ hidden_states = self.dense(hidden_states)
511
+ hidden_states = self.dropout(hidden_states)
512
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
513
+ return hidden_states
514
+
515
+
516
+ class CanineAttention(nn.Module):
517
+ """
518
+ Additional arguments related to local attention:
519
+
520
+ - **local** (`bool`, *optional*, defaults to `False`) -- Whether to apply local attention.
521
+ - **always_attend_to_first_position** (`bool`, *optional*, defaults to `False`) -- Should all blocks be able to
522
+ attend
523
+ to the `to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (`bool`,
524
+ *optional*, defaults to `False`) -- Should the *from_tensor*'s first position be able to attend to all
525
+ positions within the *from_tensor*? - **attend_from_chunk_width** (`int`, *optional*, defaults to 128) -- The
526
+ width of each block-wise chunk in `from_tensor`. - **attend_from_chunk_stride** (`int`, *optional*, defaults to
527
+ 128) -- The number of elements to skip when moving to the next block in `from_tensor`. -
528
+ **attend_to_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in
529
+ *to_tensor*. - **attend_to_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to
530
+ skip when moving to the next block in `to_tensor`.
531
+ """
532
+
533
+ def __init__(
534
+ self,
535
+ config,
536
+ local=False,
537
+ always_attend_to_first_position: bool = False,
538
+ first_position_attends_to_all: bool = False,
539
+ attend_from_chunk_width: int = 128,
540
+ attend_from_chunk_stride: int = 128,
541
+ attend_to_chunk_width: int = 128,
542
+ attend_to_chunk_stride: int = 128,
543
+ ):
544
+ super().__init__()
545
+ self.self = CanineSelfAttention(config)
546
+ self.output = CanineSelfOutput(config)
547
+ self.pruned_heads = set()
548
+
549
+ # additional arguments related to local attention
550
+ self.local = local
551
+ if attend_from_chunk_width < attend_from_chunk_stride:
552
+ raise ValueError(
553
+ "`attend_from_chunk_width` < `attend_from_chunk_stride` would cause sequence positions to get skipped."
554
+ )
555
+ if attend_to_chunk_width < attend_to_chunk_stride:
556
+ raise ValueError(
557
+ "`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped."
558
+ )
559
+ self.always_attend_to_first_position = always_attend_to_first_position
560
+ self.first_position_attends_to_all = first_position_attends_to_all
561
+ self.attend_from_chunk_width = attend_from_chunk_width
562
+ self.attend_from_chunk_stride = attend_from_chunk_stride
563
+ self.attend_to_chunk_width = attend_to_chunk_width
564
+ self.attend_to_chunk_stride = attend_to_chunk_stride
565
+
566
+ def prune_heads(self, heads):
567
+ if len(heads) == 0:
568
+ return
569
+ heads, index = find_pruneable_heads_and_indices(
570
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
571
+ )
572
+
573
+ # Prune linear layers
574
+ self.self.query = prune_linear_layer(self.self.query, index)
575
+ self.self.key = prune_linear_layer(self.self.key, index)
576
+ self.self.value = prune_linear_layer(self.self.value, index)
577
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
578
+
579
+ # Update hyper params and store pruned heads
580
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
581
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
582
+ self.pruned_heads = self.pruned_heads.union(heads)
583
+
584
+ def forward(
585
+ self,
586
+ hidden_states: Tuple[torch.FloatTensor],
587
+ attention_mask: Optional[torch.FloatTensor] = None,
588
+ head_mask: Optional[torch.FloatTensor] = None,
589
+ output_attentions: Optional[bool] = False,
590
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
591
+ if not self.local:
592
+ self_outputs = self.self(hidden_states, hidden_states, attention_mask, head_mask, output_attentions)
593
+ attention_output = self_outputs[0]
594
+ else:
595
+ from_seq_length = to_seq_length = hidden_states.shape[1]
596
+ from_tensor = to_tensor = hidden_states
597
+
598
+ # Create chunks (windows) that we will attend *from* and then concatenate them.
599
+ from_chunks = []
600
+ if self.first_position_attends_to_all:
601
+ from_chunks.append((0, 1))
602
+ # We must skip this first position so that our output sequence is the
603
+ # correct length (this matters in the *from* sequence only).
604
+ from_start = 1
605
+ else:
606
+ from_start = 0
607
+ for chunk_start in range(from_start, from_seq_length, self.attend_from_chunk_stride):
608
+ chunk_end = min(from_seq_length, chunk_start + self.attend_from_chunk_width)
609
+ from_chunks.append((chunk_start, chunk_end))
610
+
611
+ # Determine the chunks (windows) that will attend *to*.
612
+ to_chunks = []
613
+ if self.first_position_attends_to_all:
614
+ to_chunks.append((0, to_seq_length))
615
+ for chunk_start in range(0, to_seq_length, self.attend_to_chunk_stride):
616
+ chunk_end = min(to_seq_length, chunk_start + self.attend_to_chunk_width)
617
+ to_chunks.append((chunk_start, chunk_end))
618
+
619
+ if len(from_chunks) != len(to_chunks):
620
+ raise ValueError(
621
+ f"Expected to have same number of `from_chunks` ({from_chunks}) and "
622
+ f"`to_chunks` ({from_chunks}). Check strides."
623
+ )
624
+
625
+ # next, compute attention scores for each pair of windows and concatenate
626
+ attention_output_chunks = []
627
+ attention_probs_chunks = []
628
+ for (from_start, from_end), (to_start, to_end) in zip(from_chunks, to_chunks):
629
+ from_tensor_chunk = from_tensor[:, from_start:from_end, :]
630
+ to_tensor_chunk = to_tensor[:, to_start:to_end, :]
631
+ # `attention_mask`: <float>[batch_size, from_seq, to_seq]
632
+ # `attention_mask_chunk`: <float>[batch_size, from_seq_chunk, to_seq_chunk]
633
+ attention_mask_chunk = attention_mask[:, from_start:from_end, to_start:to_end]
634
+ if self.always_attend_to_first_position:
635
+ cls_attention_mask = attention_mask[:, from_start:from_end, 0:1]
636
+ attention_mask_chunk = torch.cat([cls_attention_mask, attention_mask_chunk], dim=2)
637
+
638
+ cls_position = to_tensor[:, 0:1, :]
639
+ to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1)
640
+
641
+ attention_outputs_chunk = self.self(
642
+ from_tensor_chunk, to_tensor_chunk, attention_mask_chunk, head_mask, output_attentions
643
+ )
644
+ attention_output_chunks.append(attention_outputs_chunk[0])
645
+ if output_attentions:
646
+ attention_probs_chunks.append(attention_outputs_chunk[1])
647
+
648
+ attention_output = torch.cat(attention_output_chunks, dim=1)
649
+
650
+ attention_output = self.output(attention_output, hidden_states)
651
+ outputs = (attention_output,)
652
+ if not self.local:
653
+ outputs = outputs + self_outputs[1:] # add attentions if we output them
654
+ else:
655
+ outputs = outputs + tuple(attention_probs_chunks) # add attentions if we output them
656
+ return outputs
657
+
658
+
659
+ class CanineIntermediate(nn.Module):
660
+ def __init__(self, config):
661
+ super().__init__()
662
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
663
+ if isinstance(config.hidden_act, str):
664
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
665
+ else:
666
+ self.intermediate_act_fn = config.hidden_act
667
+
668
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
669
+ hidden_states = self.dense(hidden_states)
670
+ hidden_states = self.intermediate_act_fn(hidden_states)
671
+ return hidden_states
672
+
673
+
674
+ class CanineOutput(nn.Module):
675
+ def __init__(self, config):
676
+ super().__init__()
677
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
678
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
679
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
680
+
681
+ def forward(self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> torch.FloatTensor:
682
+ hidden_states = self.dense(hidden_states)
683
+ hidden_states = self.dropout(hidden_states)
684
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
685
+ return hidden_states
686
+
687
+
688
+ class CanineLayer(nn.Module):
689
+ def __init__(
690
+ self,
691
+ config,
692
+ local,
693
+ always_attend_to_first_position,
694
+ first_position_attends_to_all,
695
+ attend_from_chunk_width,
696
+ attend_from_chunk_stride,
697
+ attend_to_chunk_width,
698
+ attend_to_chunk_stride,
699
+ ):
700
+ super().__init__()
701
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
702
+ self.seq_len_dim = 1
703
+ self.attention = CanineAttention(
704
+ config,
705
+ local,
706
+ always_attend_to_first_position,
707
+ first_position_attends_to_all,
708
+ attend_from_chunk_width,
709
+ attend_from_chunk_stride,
710
+ attend_to_chunk_width,
711
+ attend_to_chunk_stride,
712
+ )
713
+ self.intermediate = CanineIntermediate(config)
714
+ self.output = CanineOutput(config)
715
+
716
+ def forward(
717
+ self,
718
+ hidden_states: Tuple[torch.FloatTensor],
719
+ attention_mask: Optional[torch.FloatTensor] = None,
720
+ head_mask: Optional[torch.FloatTensor] = None,
721
+ output_attentions: Optional[bool] = False,
722
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
723
+ self_attention_outputs = self.attention(
724
+ hidden_states,
725
+ attention_mask,
726
+ head_mask,
727
+ output_attentions=output_attentions,
728
+ )
729
+ attention_output = self_attention_outputs[0]
730
+
731
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
732
+
733
+ layer_output = apply_chunking_to_forward(
734
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
735
+ )
736
+ outputs = (layer_output,) + outputs
737
+
738
+ return outputs
739
+
740
+ def feed_forward_chunk(self, attention_output):
741
+ intermediate_output = self.intermediate(attention_output)
742
+ layer_output = self.output(intermediate_output, attention_output)
743
+ return layer_output
744
+
745
+
746
+ class CanineEncoder(nn.Module):
747
+ def __init__(
748
+ self,
749
+ config,
750
+ local=False,
751
+ always_attend_to_first_position=False,
752
+ first_position_attends_to_all=False,
753
+ attend_from_chunk_width=128,
754
+ attend_from_chunk_stride=128,
755
+ attend_to_chunk_width=128,
756
+ attend_to_chunk_stride=128,
757
+ ):
758
+ super().__init__()
759
+ self.config = config
760
+ self.layer = nn.ModuleList(
761
+ [
762
+ CanineLayer(
763
+ config,
764
+ local,
765
+ always_attend_to_first_position,
766
+ first_position_attends_to_all,
767
+ attend_from_chunk_width,
768
+ attend_from_chunk_stride,
769
+ attend_to_chunk_width,
770
+ attend_to_chunk_stride,
771
+ )
772
+ for _ in range(config.num_hidden_layers)
773
+ ]
774
+ )
775
+ self.gradient_checkpointing = False
776
+
777
+ def forward(
778
+ self,
779
+ hidden_states: Tuple[torch.FloatTensor],
780
+ attention_mask: Optional[torch.FloatTensor] = None,
781
+ head_mask: Optional[torch.FloatTensor] = None,
782
+ output_attentions: Optional[bool] = False,
783
+ output_hidden_states: Optional[bool] = False,
784
+ return_dict: Optional[bool] = True,
785
+ ) -> Union[Tuple, BaseModelOutput]:
786
+ all_hidden_states = () if output_hidden_states else None
787
+ all_self_attentions = () if output_attentions else None
788
+
789
+ for i, layer_module in enumerate(self.layer):
790
+ if output_hidden_states:
791
+ all_hidden_states = all_hidden_states + (hidden_states,)
792
+
793
+ layer_head_mask = head_mask[i] if head_mask is not None else None
794
+
795
+ if self.gradient_checkpointing and self.training:
796
+ layer_outputs = self._gradient_checkpointing_func(
797
+ layer_module.__call__,
798
+ hidden_states,
799
+ attention_mask,
800
+ layer_head_mask,
801
+ output_attentions,
802
+ )
803
+ else:
804
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
805
+
806
+ hidden_states = layer_outputs[0]
807
+ if output_attentions:
808
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
809
+
810
+ if output_hidden_states:
811
+ all_hidden_states = all_hidden_states + (hidden_states,)
812
+
813
+ if not return_dict:
814
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
815
+ return BaseModelOutput(
816
+ last_hidden_state=hidden_states,
817
+ hidden_states=all_hidden_states,
818
+ attentions=all_self_attentions,
819
+ )
820
+
821
+
822
+ class CaninePooler(nn.Module):
823
+ def __init__(self, config):
824
+ super().__init__()
825
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
826
+ self.activation = nn.Tanh()
827
+
828
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
829
+ # We "pool" the model by simply taking the hidden state corresponding
830
+ # to the first token.
831
+ first_token_tensor = hidden_states[:, 0]
832
+ pooled_output = self.dense(first_token_tensor)
833
+ pooled_output = self.activation(pooled_output)
834
+ return pooled_output
835
+
836
+
837
+ class CaninePredictionHeadTransform(nn.Module):
838
+ def __init__(self, config):
839
+ super().__init__()
840
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
841
+ if isinstance(config.hidden_act, str):
842
+ self.transform_act_fn = ACT2FN[config.hidden_act]
843
+ else:
844
+ self.transform_act_fn = config.hidden_act
845
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
846
+
847
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
848
+ hidden_states = self.dense(hidden_states)
849
+ hidden_states = self.transform_act_fn(hidden_states)
850
+ hidden_states = self.LayerNorm(hidden_states)
851
+ return hidden_states
852
+
853
+
854
+ class CanineLMPredictionHead(nn.Module):
855
+ def __init__(self, config):
856
+ super().__init__()
857
+ self.transform = CaninePredictionHeadTransform(config)
858
+
859
+ # The output weights are the same as the input embeddings, but there is
860
+ # an output-only bias for each token.
861
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
862
+
863
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
864
+
865
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
866
+ self.decoder.bias = self.bias
867
+
868
+ def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor:
869
+ hidden_states = self.transform(hidden_states)
870
+ hidden_states = self.decoder(hidden_states)
871
+ return hidden_states
872
+
873
+
874
+ class CanineOnlyMLMHead(nn.Module):
875
+ def __init__(self, config):
876
+ super().__init__()
877
+ self.predictions = CanineLMPredictionHead(config)
878
+
879
+ def forward(
880
+ self,
881
+ sequence_output: Tuple[torch.Tensor],
882
+ ) -> Tuple[torch.Tensor]:
883
+ prediction_scores = self.predictions(sequence_output)
884
+ return prediction_scores
885
+
886
+
887
+ class CaninePreTrainedModel(PreTrainedModel):
888
+ """
889
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
890
+ models.
891
+ """
892
+
893
+ config_class = CanineConfig
894
+ load_tf_weights = load_tf_weights_in_canine
895
+ base_model_prefix = "canine"
896
+ supports_gradient_checkpointing = True
897
+
898
+ def _init_weights(self, module):
899
+ """Initialize the weights"""
900
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
901
+ # Slightly different from the TF version which uses truncated_normal for initialization
902
+ # cf https://github.com/pytorch/pytorch/pull/5617
903
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
904
+ if module.bias is not None:
905
+ module.bias.data.zero_()
906
+ elif isinstance(module, nn.Embedding):
907
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
908
+ if module.padding_idx is not None:
909
+ module.weight.data[module.padding_idx].zero_()
910
+ elif isinstance(module, nn.LayerNorm):
911
+ module.bias.data.zero_()
912
+ module.weight.data.fill_(1.0)
913
+
914
+
915
+ CANINE_START_DOCSTRING = r"""
916
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
917
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
918
+ behavior.
919
+
920
+ Parameters:
921
+ config ([`CanineConfig`]): Model configuration class with all the parameters of the model.
922
+ Initializing with a config file does not load the weights associated with the model, only the
923
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
924
+ """
925
+
926
+ CANINE_INPUTS_DOCSTRING = r"""
927
+ Args:
928
+ input_ids (`torch.LongTensor` of shape `({0})`):
929
+ Indices of input sequence tokens in the vocabulary.
930
+
931
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
932
+ [`PreTrainedTokenizer.__call__`] for details.
933
+
934
+ [What are input IDs?](../glossary#input-ids)
935
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
936
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
937
+
938
+ - 1 for tokens that are **not masked**,
939
+ - 0 for tokens that are **masked**.
940
+
941
+ [What are attention masks?](../glossary#attention-mask)
942
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
943
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
944
+ 1]`:
945
+
946
+ - 0 corresponds to a *sentence A* token,
947
+ - 1 corresponds to a *sentence B* token.
948
+
949
+ [What are token type IDs?](../glossary#token-type-ids)
950
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
951
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
952
+ config.max_position_embeddings - 1]`.
953
+
954
+ [What are position IDs?](../glossary#position-ids)
955
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
956
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
957
+
958
+ - 1 indicates the head is **not masked**,
959
+ - 0 indicates the head is **masked**.
960
+
961
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
962
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
963
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
964
+ model's internal embedding lookup matrix.
965
+ output_attentions (`bool`, *optional*):
966
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
967
+ tensors for more detail.
968
+ output_hidden_states (`bool`, *optional*):
969
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
970
+ more detail.
971
+ return_dict (`bool`, *optional*):
972
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
973
+ """
974
+
975
+
976
+ @add_start_docstrings(
977
+ "The bare CANINE Model transformer outputting raw hidden-states without any specific head on top.",
978
+ CANINE_START_DOCSTRING,
979
+ )
980
+ class CanineModel(CaninePreTrainedModel):
981
+ def __init__(self, config, add_pooling_layer=True):
982
+ super().__init__(config)
983
+ self.config = config
984
+ shallow_config = copy.deepcopy(config)
985
+ shallow_config.num_hidden_layers = 1
986
+
987
+ self.char_embeddings = CanineEmbeddings(config)
988
+ # shallow/low-dim transformer encoder to get a initial character encoding
989
+ self.initial_char_encoder = CanineEncoder(
990
+ shallow_config,
991
+ local=True,
992
+ always_attend_to_first_position=False,
993
+ first_position_attends_to_all=False,
994
+ attend_from_chunk_width=config.local_transformer_stride,
995
+ attend_from_chunk_stride=config.local_transformer_stride,
996
+ attend_to_chunk_width=config.local_transformer_stride,
997
+ attend_to_chunk_stride=config.local_transformer_stride,
998
+ )
999
+ self.chars_to_molecules = CharactersToMolecules(config)
1000
+ # deep transformer encoder
1001
+ self.encoder = CanineEncoder(config)
1002
+ self.projection = ConvProjection(config)
1003
+ # shallow/low-dim transformer encoder to get a final character encoding
1004
+ self.final_char_encoder = CanineEncoder(shallow_config)
1005
+
1006
+ self.pooler = CaninePooler(config) if add_pooling_layer else None
1007
+
1008
+ # Initialize weights and apply final processing
1009
+ self.post_init()
1010
+
1011
+ def _prune_heads(self, heads_to_prune):
1012
+ """
1013
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1014
+ class PreTrainedModel
1015
+ """
1016
+ for layer, heads in heads_to_prune.items():
1017
+ self.encoder.layer[layer].attention.prune_heads(heads)
1018
+
1019
+ def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask):
1020
+ """
1021
+ Create 3D attention mask from a 2D tensor mask.
1022
+
1023
+ Args:
1024
+ from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
1025
+ to_mask: int32 Tensor of shape [batch_size, to_seq_length].
1026
+
1027
+ Returns:
1028
+ float Tensor of shape [batch_size, from_seq_length, to_seq_length].
1029
+ """
1030
+ batch_size, from_seq_length = from_tensor.shape[0], from_tensor.shape[1]
1031
+
1032
+ to_seq_length = to_mask.shape[1]
1033
+
1034
+ to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float()
1035
+
1036
+ # We don't assume that `from_tensor` is a mask (although it could be). We
1037
+ # don't actually care if we attend *from* padding tokens (only *to* padding)
1038
+ # tokens so we create a tensor of all ones.
1039
+ broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device)
1040
+
1041
+ # Here we broadcast along two dimensions to create the mask.
1042
+ mask = broadcast_ones * to_mask
1043
+
1044
+ return mask
1045
+
1046
+ def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int):
1047
+ """Downsample 2D character attention mask to 2D molecule attention mask using MaxPool1d layer."""
1048
+
1049
+ # first, make char_attention_mask 3D by adding a channel dim
1050
+ batch_size, char_seq_len = char_attention_mask.shape
1051
+ poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len))
1052
+
1053
+ # next, apply MaxPool1d to get pooled_molecule_mask of shape (batch_size, 1, mol_seq_len)
1054
+ pooled_molecule_mask = torch.nn.MaxPool1d(kernel_size=downsampling_rate, stride=downsampling_rate)(
1055
+ poolable_char_mask.float()
1056
+ )
1057
+
1058
+ # finally, squeeze to get tensor of shape (batch_size, mol_seq_len)
1059
+ molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1)
1060
+
1061
+ return molecule_attention_mask
1062
+
1063
+ def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: torch.Tensor) -> torch.Tensor:
1064
+ """Repeats molecules to make them the same length as the char sequence."""
1065
+
1066
+ rate = self.config.downsampling_rate
1067
+
1068
+ molecules_without_extra_cls = molecules[:, 1:, :]
1069
+ # `repeated`: [batch_size, almost_char_seq_len, molecule_hidden_size]
1070
+ repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2)
1071
+
1072
+ # So far, we've repeated the elements sufficient for any `char_seq_length`
1073
+ # that's a multiple of `downsampling_rate`. Now we account for the last
1074
+ # n elements (n < `downsampling_rate`), i.e. the remainder of floor
1075
+ # division. We do this by repeating the last molecule a few extra times.
1076
+ last_molecule = molecules[:, -1:, :]
1077
+ remainder_length = torch.fmod(torch.tensor(char_seq_length), torch.tensor(rate)).item()
1078
+ remainder_repeated = torch.repeat_interleave(
1079
+ last_molecule,
1080
+ # +1 molecule to compensate for truncation.
1081
+ repeats=remainder_length + rate,
1082
+ dim=-2,
1083
+ )
1084
+
1085
+ # `repeated`: [batch_size, char_seq_len, molecule_hidden_size]
1086
+ return torch.cat([repeated, remainder_repeated], dim=-2)
1087
+
1088
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1089
+ @add_code_sample_docstrings(
1090
+ checkpoint=_CHECKPOINT_FOR_DOC,
1091
+ output_type=CanineModelOutputWithPooling,
1092
+ config_class=_CONFIG_FOR_DOC,
1093
+ )
1094
+ def forward(
1095
+ self,
1096
+ input_ids: Optional[torch.LongTensor] = None,
1097
+ attention_mask: Optional[torch.FloatTensor] = None,
1098
+ token_type_ids: Optional[torch.LongTensor] = None,
1099
+ position_ids: Optional[torch.LongTensor] = None,
1100
+ head_mask: Optional[torch.FloatTensor] = None,
1101
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1102
+ output_attentions: Optional[bool] = None,
1103
+ output_hidden_states: Optional[bool] = None,
1104
+ return_dict: Optional[bool] = None,
1105
+ ) -> Union[Tuple, CanineModelOutputWithPooling]:
1106
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1107
+ output_hidden_states = (
1108
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1109
+ )
1110
+ all_hidden_states = () if output_hidden_states else None
1111
+ all_self_attentions = () if output_attentions else None
1112
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1113
+
1114
+ if input_ids is not None and inputs_embeds is not None:
1115
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1116
+ elif input_ids is not None:
1117
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1118
+ input_shape = input_ids.size()
1119
+ elif inputs_embeds is not None:
1120
+ input_shape = inputs_embeds.size()[:-1]
1121
+ else:
1122
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1123
+
1124
+ batch_size, seq_length = input_shape
1125
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1126
+
1127
+ if attention_mask is None:
1128
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
1129
+ if token_type_ids is None:
1130
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1131
+
1132
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1133
+ # ourselves in which case we just need to make it broadcastable to all heads.
1134
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1135
+ molecule_attention_mask = self._downsample_attention_mask(
1136
+ attention_mask, downsampling_rate=self.config.downsampling_rate
1137
+ )
1138
+ extended_molecule_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1139
+ molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1])
1140
+ )
1141
+
1142
+ # Prepare head mask if needed
1143
+ # 1.0 in head_mask indicate we keep the head
1144
+ # attention_probs has shape bsz x n_heads x N x N
1145
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1146
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1147
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1148
+
1149
+ # `input_char_embeddings`: shape (batch_size, char_seq, char_dim)
1150
+ input_char_embeddings = self.char_embeddings(
1151
+ input_ids=input_ids,
1152
+ position_ids=position_ids,
1153
+ token_type_ids=token_type_ids,
1154
+ inputs_embeds=inputs_embeds,
1155
+ )
1156
+
1157
+ # Contextualize character embeddings using shallow Transformer.
1158
+ # We use a 3D attention mask for the local attention.
1159
+ # `input_char_encoding`: shape (batch_size, char_seq_len, char_dim)
1160
+ char_attention_mask = self._create_3d_attention_mask_from_input_mask(
1161
+ input_ids if input_ids is not None else inputs_embeds, attention_mask
1162
+ )
1163
+ init_chars_encoder_outputs = self.initial_char_encoder(
1164
+ input_char_embeddings,
1165
+ attention_mask=char_attention_mask,
1166
+ output_attentions=output_attentions,
1167
+ output_hidden_states=output_hidden_states,
1168
+ )
1169
+ input_char_encoding = init_chars_encoder_outputs.last_hidden_state
1170
+
1171
+ # Downsample chars to molecules.
1172
+ # The following lines have dimensions: [batch, molecule_seq, molecule_dim].
1173
+ # In this transformation, we change the dimensionality from `char_dim` to
1174
+ # `molecule_dim`, but do *NOT* add a resnet connection. Instead, we rely on
1175
+ # the resnet connections (a) from the final char transformer stack back into
1176
+ # the original char transformer stack and (b) the resnet connections from
1177
+ # the final char transformer stack back into the deep BERT stack of
1178
+ # molecules.
1179
+ #
1180
+ # Empirically, it is critical to use a powerful enough transformation here:
1181
+ # mean pooling causes training to diverge with huge gradient norms in this
1182
+ # region of the model; using a convolution here resolves this issue. From
1183
+ # this, it seems that molecules and characters require a very different
1184
+ # feature space; intuitively, this makes sense.
1185
+ init_molecule_encoding = self.chars_to_molecules(input_char_encoding)
1186
+
1187
+ # Deep BERT encoder
1188
+ # `molecule_sequence_output`: shape (batch_size, mol_seq_len, mol_dim)
1189
+ encoder_outputs = self.encoder(
1190
+ init_molecule_encoding,
1191
+ attention_mask=extended_molecule_attention_mask,
1192
+ head_mask=head_mask,
1193
+ output_attentions=output_attentions,
1194
+ output_hidden_states=output_hidden_states,
1195
+ return_dict=return_dict,
1196
+ )
1197
+ molecule_sequence_output = encoder_outputs[0]
1198
+ pooled_output = self.pooler(molecule_sequence_output) if self.pooler is not None else None
1199
+
1200
+ # Upsample molecules back to characters.
1201
+ # `repeated_molecules`: shape (batch_size, char_seq_len, mol_hidden_size)
1202
+ repeated_molecules = self._repeat_molecules(molecule_sequence_output, char_seq_length=input_shape[-1])
1203
+
1204
+ # Concatenate representations (contextualized char embeddings and repeated molecules):
1205
+ # `concat`: shape [batch_size, char_seq_len, molecule_hidden_size+char_hidden_final]
1206
+ concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1)
1207
+
1208
+ # Project representation dimension back to hidden_size
1209
+ # `sequence_output`: shape (batch_size, char_seq_len, hidden_size])
1210
+ sequence_output = self.projection(concat)
1211
+
1212
+ # Apply final shallow Transformer
1213
+ # `sequence_output`: shape (batch_size, char_seq_len, hidden_size])
1214
+ final_chars_encoder_outputs = self.final_char_encoder(
1215
+ sequence_output,
1216
+ attention_mask=extended_attention_mask,
1217
+ output_attentions=output_attentions,
1218
+ output_hidden_states=output_hidden_states,
1219
+ )
1220
+ sequence_output = final_chars_encoder_outputs.last_hidden_state
1221
+
1222
+ if output_hidden_states:
1223
+ deep_encoder_hidden_states = encoder_outputs.hidden_states if return_dict else encoder_outputs[1]
1224
+ all_hidden_states = (
1225
+ all_hidden_states
1226
+ + init_chars_encoder_outputs.hidden_states
1227
+ + deep_encoder_hidden_states
1228
+ + final_chars_encoder_outputs.hidden_states
1229
+ )
1230
+
1231
+ if output_attentions:
1232
+ deep_encoder_self_attentions = encoder_outputs.attentions if return_dict else encoder_outputs[-1]
1233
+ all_self_attentions = (
1234
+ all_self_attentions
1235
+ + init_chars_encoder_outputs.attentions
1236
+ + deep_encoder_self_attentions
1237
+ + final_chars_encoder_outputs.attentions
1238
+ )
1239
+
1240
+ if not return_dict:
1241
+ output = (sequence_output, pooled_output)
1242
+ output += tuple(v for v in [all_hidden_states, all_self_attentions] if v is not None)
1243
+ return output
1244
+
1245
+ return CanineModelOutputWithPooling(
1246
+ last_hidden_state=sequence_output,
1247
+ pooler_output=pooled_output,
1248
+ hidden_states=all_hidden_states,
1249
+ attentions=all_self_attentions,
1250
+ )
1251
+
1252
+
1253
+ @add_start_docstrings(
1254
+ """
1255
+ CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1256
+ output) e.g. for GLUE tasks.
1257
+ """,
1258
+ CANINE_START_DOCSTRING,
1259
+ )
1260
+ class CanineForSequenceClassification(CaninePreTrainedModel):
1261
+ def __init__(self, config):
1262
+ super().__init__(config)
1263
+ self.num_labels = config.num_labels
1264
+
1265
+ self.canine = CanineModel(config)
1266
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1267
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1268
+
1269
+ # Initialize weights and apply final processing
1270
+ self.post_init()
1271
+
1272
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1273
+ @add_code_sample_docstrings(
1274
+ checkpoint=_CHECKPOINT_FOR_DOC,
1275
+ output_type=SequenceClassifierOutput,
1276
+ config_class=_CONFIG_FOR_DOC,
1277
+ )
1278
+ def forward(
1279
+ self,
1280
+ input_ids: Optional[torch.LongTensor] = None,
1281
+ attention_mask: Optional[torch.FloatTensor] = None,
1282
+ token_type_ids: Optional[torch.LongTensor] = None,
1283
+ position_ids: Optional[torch.LongTensor] = None,
1284
+ head_mask: Optional[torch.FloatTensor] = None,
1285
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1286
+ labels: Optional[torch.LongTensor] = None,
1287
+ output_attentions: Optional[bool] = None,
1288
+ output_hidden_states: Optional[bool] = None,
1289
+ return_dict: Optional[bool] = None,
1290
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1291
+ r"""
1292
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1293
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1294
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1295
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1296
+ """
1297
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1298
+
1299
+ outputs = self.canine(
1300
+ input_ids,
1301
+ attention_mask=attention_mask,
1302
+ token_type_ids=token_type_ids,
1303
+ position_ids=position_ids,
1304
+ head_mask=head_mask,
1305
+ inputs_embeds=inputs_embeds,
1306
+ output_attentions=output_attentions,
1307
+ output_hidden_states=output_hidden_states,
1308
+ return_dict=return_dict,
1309
+ )
1310
+
1311
+ pooled_output = outputs[1]
1312
+
1313
+ pooled_output = self.dropout(pooled_output)
1314
+ logits = self.classifier(pooled_output)
1315
+
1316
+ loss = None
1317
+ if labels is not None:
1318
+ if self.config.problem_type is None:
1319
+ if self.num_labels == 1:
1320
+ self.config.problem_type = "regression"
1321
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1322
+ self.config.problem_type = "single_label_classification"
1323
+ else:
1324
+ self.config.problem_type = "multi_label_classification"
1325
+
1326
+ if self.config.problem_type == "regression":
1327
+ loss_fct = MSELoss()
1328
+ if self.num_labels == 1:
1329
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1330
+ else:
1331
+ loss = loss_fct(logits, labels)
1332
+ elif self.config.problem_type == "single_label_classification":
1333
+ loss_fct = CrossEntropyLoss()
1334
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1335
+ elif self.config.problem_type == "multi_label_classification":
1336
+ loss_fct = BCEWithLogitsLoss()
1337
+ loss = loss_fct(logits, labels)
1338
+ if not return_dict:
1339
+ output = (logits,) + outputs[2:]
1340
+ return ((loss,) + output) if loss is not None else output
1341
+
1342
+ return SequenceClassifierOutput(
1343
+ loss=loss,
1344
+ logits=logits,
1345
+ hidden_states=outputs.hidden_states,
1346
+ attentions=outputs.attentions,
1347
+ )
1348
+
1349
+
1350
+ @add_start_docstrings(
1351
+ """
1352
+ CANINE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1353
+ softmax) e.g. for RocStories/SWAG tasks.
1354
+ """,
1355
+ CANINE_START_DOCSTRING,
1356
+ )
1357
+ class CanineForMultipleChoice(CaninePreTrainedModel):
1358
+ def __init__(self, config):
1359
+ super().__init__(config)
1360
+
1361
+ self.canine = CanineModel(config)
1362
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1363
+ self.classifier = nn.Linear(config.hidden_size, 1)
1364
+
1365
+ # Initialize weights and apply final processing
1366
+ self.post_init()
1367
+
1368
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1369
+ @add_code_sample_docstrings(
1370
+ checkpoint=_CHECKPOINT_FOR_DOC,
1371
+ output_type=MultipleChoiceModelOutput,
1372
+ config_class=_CONFIG_FOR_DOC,
1373
+ )
1374
+ def forward(
1375
+ self,
1376
+ input_ids: Optional[torch.LongTensor] = None,
1377
+ attention_mask: Optional[torch.FloatTensor] = None,
1378
+ token_type_ids: Optional[torch.LongTensor] = None,
1379
+ position_ids: Optional[torch.LongTensor] = None,
1380
+ head_mask: Optional[torch.FloatTensor] = None,
1381
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1382
+ labels: Optional[torch.LongTensor] = None,
1383
+ output_attentions: Optional[bool] = None,
1384
+ output_hidden_states: Optional[bool] = None,
1385
+ return_dict: Optional[bool] = None,
1386
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1387
+ r"""
1388
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1389
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1390
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1391
+ `input_ids` above)
1392
+ """
1393
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1394
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1395
+
1396
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1397
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1398
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1399
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1400
+ inputs_embeds = (
1401
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1402
+ if inputs_embeds is not None
1403
+ else None
1404
+ )
1405
+
1406
+ outputs = self.canine(
1407
+ input_ids,
1408
+ attention_mask=attention_mask,
1409
+ token_type_ids=token_type_ids,
1410
+ position_ids=position_ids,
1411
+ head_mask=head_mask,
1412
+ inputs_embeds=inputs_embeds,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ )
1417
+
1418
+ pooled_output = outputs[1]
1419
+
1420
+ pooled_output = self.dropout(pooled_output)
1421
+ logits = self.classifier(pooled_output)
1422
+ reshaped_logits = logits.view(-1, num_choices)
1423
+
1424
+ loss = None
1425
+ if labels is not None:
1426
+ loss_fct = CrossEntropyLoss()
1427
+ loss = loss_fct(reshaped_logits, labels)
1428
+
1429
+ if not return_dict:
1430
+ output = (reshaped_logits,) + outputs[2:]
1431
+ return ((loss,) + output) if loss is not None else output
1432
+
1433
+ return MultipleChoiceModelOutput(
1434
+ loss=loss,
1435
+ logits=reshaped_logits,
1436
+ hidden_states=outputs.hidden_states,
1437
+ attentions=outputs.attentions,
1438
+ )
1439
+
1440
+
1441
+ @add_start_docstrings(
1442
+ """
1443
+ CANINE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1444
+ Named-Entity-Recognition (NER) tasks.
1445
+ """,
1446
+ CANINE_START_DOCSTRING,
1447
+ )
1448
+ class CanineForTokenClassification(CaninePreTrainedModel):
1449
+ def __init__(self, config):
1450
+ super().__init__(config)
1451
+ self.num_labels = config.num_labels
1452
+
1453
+ self.canine = CanineModel(config)
1454
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1455
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1456
+
1457
+ # Initialize weights and apply final processing
1458
+ self.post_init()
1459
+
1460
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1461
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1462
+ def forward(
1463
+ self,
1464
+ input_ids: Optional[torch.LongTensor] = None,
1465
+ attention_mask: Optional[torch.FloatTensor] = None,
1466
+ token_type_ids: Optional[torch.LongTensor] = None,
1467
+ position_ids: Optional[torch.LongTensor] = None,
1468
+ head_mask: Optional[torch.FloatTensor] = None,
1469
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1470
+ labels: Optional[torch.LongTensor] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ return_dict: Optional[bool] = None,
1474
+ ) -> Union[Tuple, TokenClassifierOutput]:
1475
+ r"""
1476
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1477
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1478
+
1479
+ Returns:
1480
+
1481
+ Example:
1482
+
1483
+ ```python
1484
+ >>> from transformers import AutoTokenizer, CanineForTokenClassification
1485
+ >>> import torch
1486
+
1487
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/canine-s")
1488
+ >>> model = CanineForTokenClassification.from_pretrained("google/canine-s")
1489
+
1490
+ >>> inputs = tokenizer(
1491
+ ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt"
1492
+ ... )
1493
+
1494
+ >>> with torch.no_grad():
1495
+ ... logits = model(**inputs).logits
1496
+
1497
+ >>> predicted_token_class_ids = logits.argmax(-1)
1498
+
1499
+ >>> # Note that tokens are classified rather then input words which means that
1500
+ >>> # there might be more predicted token classes than words.
1501
+ >>> # Multiple token classes might account for the same word
1502
+ >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]]
1503
+ >>> predicted_tokens_classes # doctest: +SKIP
1504
+ ```
1505
+
1506
+ ```python
1507
+ >>> labels = predicted_token_class_ids
1508
+ >>> loss = model(**inputs, labels=labels).loss
1509
+ >>> round(loss.item(), 2) # doctest: +SKIP
1510
+ ```"""
1511
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1512
+
1513
+ outputs = self.canine(
1514
+ input_ids,
1515
+ attention_mask=attention_mask,
1516
+ token_type_ids=token_type_ids,
1517
+ position_ids=position_ids,
1518
+ head_mask=head_mask,
1519
+ inputs_embeds=inputs_embeds,
1520
+ output_attentions=output_attentions,
1521
+ output_hidden_states=output_hidden_states,
1522
+ return_dict=return_dict,
1523
+ )
1524
+
1525
+ sequence_output = outputs[0]
1526
+
1527
+ sequence_output = self.dropout(sequence_output)
1528
+ logits = self.classifier(sequence_output)
1529
+
1530
+ loss = None
1531
+ if labels is not None:
1532
+ loss_fct = CrossEntropyLoss()
1533
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1534
+
1535
+ if not return_dict:
1536
+ output = (logits,) + outputs[2:]
1537
+ return ((loss,) + output) if loss is not None else output
1538
+
1539
+ return TokenClassifierOutput(
1540
+ loss=loss,
1541
+ logits=logits,
1542
+ hidden_states=outputs.hidden_states,
1543
+ attentions=outputs.attentions,
1544
+ )
1545
+
1546
+
1547
+ @add_start_docstrings(
1548
+ """
1549
+ CANINE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1550
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1551
+ """,
1552
+ CANINE_START_DOCSTRING,
1553
+ )
1554
+ class CanineForQuestionAnswering(CaninePreTrainedModel):
1555
+ def __init__(self, config):
1556
+ super().__init__(config)
1557
+ self.num_labels = config.num_labels
1558
+
1559
+ self.canine = CanineModel(config)
1560
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1561
+
1562
+ # Initialize weights and apply final processing
1563
+ self.post_init()
1564
+
1565
+ @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1566
+ @add_code_sample_docstrings(
1567
+ checkpoint="Splend1dchan/canine-c-squad",
1568
+ output_type=QuestionAnsweringModelOutput,
1569
+ config_class=_CONFIG_FOR_DOC,
1570
+ expected_output="'nice puppet'",
1571
+ expected_loss=8.81,
1572
+ )
1573
+ def forward(
1574
+ self,
1575
+ input_ids: Optional[torch.LongTensor] = None,
1576
+ attention_mask: Optional[torch.FloatTensor] = None,
1577
+ token_type_ids: Optional[torch.LongTensor] = None,
1578
+ position_ids: Optional[torch.LongTensor] = None,
1579
+ head_mask: Optional[torch.FloatTensor] = None,
1580
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1581
+ start_positions: Optional[torch.LongTensor] = None,
1582
+ end_positions: Optional[torch.LongTensor] = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ return_dict: Optional[bool] = None,
1586
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1587
+ r"""
1588
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1589
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1590
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1591
+ are not taken into account for computing the loss.
1592
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1593
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1594
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1595
+ are not taken into account for computing the loss.
1596
+ """
1597
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1598
+
1599
+ outputs = self.canine(
1600
+ input_ids,
1601
+ attention_mask=attention_mask,
1602
+ token_type_ids=token_type_ids,
1603
+ position_ids=position_ids,
1604
+ head_mask=head_mask,
1605
+ inputs_embeds=inputs_embeds,
1606
+ output_attentions=output_attentions,
1607
+ output_hidden_states=output_hidden_states,
1608
+ return_dict=return_dict,
1609
+ )
1610
+
1611
+ sequence_output = outputs[0]
1612
+
1613
+ logits = self.qa_outputs(sequence_output)
1614
+ start_logits, end_logits = logits.split(1, dim=-1)
1615
+ start_logits = start_logits.squeeze(-1)
1616
+ end_logits = end_logits.squeeze(-1)
1617
+
1618
+ total_loss = None
1619
+ if start_positions is not None and end_positions is not None:
1620
+ # If we are on multi-GPU, split add a dimension
1621
+ if len(start_positions.size()) > 1:
1622
+ start_positions = start_positions.squeeze(-1)
1623
+ if len(end_positions.size()) > 1:
1624
+ end_positions = end_positions.squeeze(-1)
1625
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1626
+ ignored_index = start_logits.size(1)
1627
+ start_positions.clamp_(0, ignored_index)
1628
+ end_positions.clamp_(0, ignored_index)
1629
+
1630
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1631
+ start_loss = loss_fct(start_logits, start_positions)
1632
+ end_loss = loss_fct(end_logits, end_positions)
1633
+ total_loss = (start_loss + end_loss) / 2
1634
+
1635
+ if not return_dict:
1636
+ output = (start_logits, end_logits) + outputs[2:]
1637
+ return ((total_loss,) + output) if total_loss is not None else output
1638
+
1639
+ return QuestionAnsweringModelOutput(
1640
+ loss=total_loss,
1641
+ start_logits=start_logits,
1642
+ end_logits=end_logits,
1643
+ hidden_states=outputs.hidden_states,
1644
+ attentions=outputs.attentions,
1645
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_clipseg": [
21
+ "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "CLIPSegConfig",
23
+ "CLIPSegTextConfig",
24
+ "CLIPSegVisionConfig",
25
+ ],
26
+ "processing_clipseg": ["CLIPSegProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_clipseg"] = [
36
+ "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "CLIPSegModel",
38
+ "CLIPSegPreTrainedModel",
39
+ "CLIPSegTextModel",
40
+ "CLIPSegVisionModel",
41
+ "CLIPSegForImageSegmentation",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_clipseg import (
46
+ CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ CLIPSegConfig,
48
+ CLIPSegTextConfig,
49
+ CLIPSegVisionConfig,
50
+ )
51
+ from .processing_clipseg import CLIPSegProcessor
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_clipseg import (
60
+ CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
61
+ CLIPSegForImageSegmentation,
62
+ CLIPSegModel,
63
+ CLIPSegPreTrainedModel,
64
+ CLIPSegTextModel,
65
+ CLIPSegVisionModel,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/convert_clipseg_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/modeling_clipseg.cpython-310.pyc ADDED
Binary file (43.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/__pycache__/processing_clipseg.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/configuration_clipseg.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CLIPSeg model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class CLIPSegTextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
33
+ CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the CLIPSeg
35
+ [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 49408):
42
+ Vocabulary size of the CLIPSeg text model. Defines the number of different tokens that can be represented
43
+ by the `inputs_ids` passed when calling [`CLIPSegModel`].
44
+ hidden_size (`int`, *optional*, defaults to 512):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ intermediate_size (`int`, *optional*, defaults to 2048):
47
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 8):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ max_position_embeddings (`int`, *optional*, defaults to 77):
53
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
54
+ just in case (e.g., 512 or 1024 or 2048).
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
58
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
59
+ The epsilon used by the layer normalization layers.
60
+ attention_dropout (`float`, *optional*, defaults to 0.0):
61
+ The dropout ratio for the attention probabilities.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ initializer_factor (`float`, *optional*, defaults to 1.0):
65
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
66
+ testing).
67
+ pad_token_id (`int`, *optional*, defaults to 1):
68
+ Padding token id.
69
+ bos_token_id (`int`, *optional*, defaults to 49406):
70
+ Beginning of stream token id.
71
+ eos_token_id (`int`, *optional*, defaults to 49407):
72
+ End of stream token id.
73
+
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import CLIPSegTextConfig, CLIPSegTextModel
78
+
79
+ >>> # Initializing a CLIPSegTextConfig with CIDAS/clipseg-rd64 style configuration
80
+ >>> configuration = CLIPSegTextConfig()
81
+
82
+ >>> # Initializing a CLIPSegTextModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
83
+ >>> model = CLIPSegTextModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+
89
+ model_type = "clipseg_text_model"
90
+
91
+ def __init__(
92
+ self,
93
+ vocab_size=49408,
94
+ hidden_size=512,
95
+ intermediate_size=2048,
96
+ num_hidden_layers=12,
97
+ num_attention_heads=8,
98
+ max_position_embeddings=77,
99
+ hidden_act="quick_gelu",
100
+ layer_norm_eps=1e-5,
101
+ attention_dropout=0.0,
102
+ initializer_range=0.02,
103
+ initializer_factor=1.0,
104
+ pad_token_id=1,
105
+ bos_token_id=49406,
106
+ eos_token_id=49407,
107
+ **kwargs,
108
+ ):
109
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
110
+
111
+ self.vocab_size = vocab_size
112
+ self.hidden_size = hidden_size
113
+ self.intermediate_size = intermediate_size
114
+ self.num_hidden_layers = num_hidden_layers
115
+ self.num_attention_heads = num_attention_heads
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.layer_norm_eps = layer_norm_eps
118
+ self.hidden_act = hidden_act
119
+ self.initializer_range = initializer_range
120
+ self.initializer_factor = initializer_factor
121
+ self.attention_dropout = attention_dropout
122
+
123
+ @classmethod
124
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
125
+ cls._set_token_in_kwargs(kwargs)
126
+
127
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
128
+
129
+ # get the text config dict if we are loading from CLIPSegConfig
130
+ if config_dict.get("model_type") == "clipseg":
131
+ config_dict = config_dict["text_config"]
132
+
133
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
134
+ logger.warning(
135
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
136
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
137
+ )
138
+
139
+ return cls.from_dict(config_dict, **kwargs)
140
+
141
+
142
+ class CLIPSegVisionConfig(PretrainedConfig):
143
+ r"""
144
+ This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
145
+ CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
146
+ with the defaults will yield a similar configuration to that of the CLIPSeg
147
+ [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
148
+
149
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
150
+ documentation from [`PretrainedConfig`] for more information.
151
+
152
+ Args:
153
+ hidden_size (`int`, *optional*, defaults to 768):
154
+ Dimensionality of the encoder layers and the pooler layer.
155
+ intermediate_size (`int`, *optional*, defaults to 3072):
156
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
157
+ num_hidden_layers (`int`, *optional*, defaults to 12):
158
+ Number of hidden layers in the Transformer encoder.
159
+ num_attention_heads (`int`, *optional*, defaults to 12):
160
+ Number of attention heads for each attention layer in the Transformer encoder.
161
+ num_channels (`int`, *optional*, defaults to 3):
162
+ The number of input channels.
163
+ image_size (`int`, *optional*, defaults to 224):
164
+ The size (resolution) of each image.
165
+ patch_size (`int`, *optional*, defaults to 32):
166
+ The size (resolution) of each patch.
167
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
168
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
169
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
170
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
171
+ The epsilon used by the layer normalization layers.
172
+ attention_dropout (`float`, *optional*, defaults to 0.0):
173
+ The dropout ratio for the attention probabilities.
174
+ initializer_range (`float`, *optional*, defaults to 0.02):
175
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
176
+ initializer_factor (`float`, *optional*, defaults to 1.0):
177
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
178
+ testing).
179
+
180
+ Example:
181
+
182
+ ```python
183
+ >>> from transformers import CLIPSegVisionConfig, CLIPSegVisionModel
184
+
185
+ >>> # Initializing a CLIPSegVisionConfig with CIDAS/clipseg-rd64 style configuration
186
+ >>> configuration = CLIPSegVisionConfig()
187
+
188
+ >>> # Initializing a CLIPSegVisionModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
189
+ >>> model = CLIPSegVisionModel(configuration)
190
+
191
+ >>> # Accessing the model configuration
192
+ >>> configuration = model.config
193
+ ```"""
194
+
195
+ model_type = "clipseg_vision_model"
196
+
197
+ def __init__(
198
+ self,
199
+ hidden_size=768,
200
+ intermediate_size=3072,
201
+ num_hidden_layers=12,
202
+ num_attention_heads=12,
203
+ num_channels=3,
204
+ image_size=224,
205
+ patch_size=32,
206
+ hidden_act="quick_gelu",
207
+ layer_norm_eps=1e-5,
208
+ attention_dropout=0.0,
209
+ initializer_range=0.02,
210
+ initializer_factor=1.0,
211
+ **kwargs,
212
+ ):
213
+ super().__init__(**kwargs)
214
+
215
+ self.hidden_size = hidden_size
216
+ self.intermediate_size = intermediate_size
217
+ self.num_hidden_layers = num_hidden_layers
218
+ self.num_attention_heads = num_attention_heads
219
+ self.num_channels = num_channels
220
+ self.patch_size = patch_size
221
+ self.image_size = image_size
222
+ self.initializer_range = initializer_range
223
+ self.initializer_factor = initializer_factor
224
+ self.attention_dropout = attention_dropout
225
+ self.layer_norm_eps = layer_norm_eps
226
+ self.hidden_act = hidden_act
227
+
228
+ @classmethod
229
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
230
+ cls._set_token_in_kwargs(kwargs)
231
+
232
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
233
+
234
+ # get the vision config dict if we are loading from CLIPSegConfig
235
+ if config_dict.get("model_type") == "clipseg":
236
+ config_dict = config_dict["vision_config"]
237
+
238
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
239
+ logger.warning(
240
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
241
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
242
+ )
243
+
244
+ return cls.from_dict(config_dict, **kwargs)
245
+
246
+
247
+ class CLIPSegConfig(PretrainedConfig):
248
+ r"""
249
+ [`CLIPSegConfig`] is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to
250
+ instantiate a CLIPSeg model according to the specified arguments, defining the text model and vision model configs.
251
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIPSeg
252
+ [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
253
+
254
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
255
+ documentation from [`PretrainedConfig`] for more information.
256
+
257
+ Args:
258
+ text_config (`dict`, *optional*):
259
+ Dictionary of configuration options used to initialize [`CLIPSegTextConfig`].
260
+ vision_config (`dict`, *optional*):
261
+ Dictionary of configuration options used to initialize [`CLIPSegVisionConfig`].
262
+ projection_dim (`int`, *optional*, defaults to 512):
263
+ Dimensionality of text and vision projection layers.
264
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
265
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLIPSeg implementation.
266
+ extract_layers (`List[int]`, *optional*, defaults to `[3, 6, 9]`):
267
+ Layers to extract when forwarding the query image through the frozen visual backbone of CLIP.
268
+ reduce_dim (`int`, *optional*, defaults to 64):
269
+ Dimensionality to reduce the CLIP vision embedding.
270
+ decoder_num_attention_heads (`int`, *optional*, defaults to 4):
271
+ Number of attention heads in the decoder of CLIPSeg.
272
+ decoder_attention_dropout (`float`, *optional*, defaults to 0.0):
273
+ The dropout ratio for the attention probabilities.
274
+ decoder_hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
275
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
276
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
277
+ decoder_intermediate_size (`int`, *optional*, defaults to 2048):
278
+ Dimensionality of the "intermediate" (i.e., feed-forward) layers in the Transformer decoder.
279
+ conditional_layer (`int`, *optional*, defaults to 0):
280
+ The layer to use of the Transformer encoder whose activations will be combined with the condition
281
+ embeddings using FiLM (Feature-wise Linear Modulation). If 0, the last layer is used.
282
+ use_complex_transposed_convolution (`bool`, *optional*, defaults to `False`):
283
+ Whether to use a more complex transposed convolution in the decoder, enabling more fine-grained
284
+ segmentation.
285
+ kwargs (*optional*):
286
+ Dictionary of keyword arguments.
287
+
288
+ Example:
289
+
290
+ ```python
291
+ >>> from transformers import CLIPSegConfig, CLIPSegModel
292
+
293
+ >>> # Initializing a CLIPSegConfig with CIDAS/clipseg-rd64 style configuration
294
+ >>> configuration = CLIPSegConfig()
295
+
296
+ >>> # Initializing a CLIPSegModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
297
+ >>> model = CLIPSegModel(configuration)
298
+
299
+ >>> # Accessing the model configuration
300
+ >>> configuration = model.config
301
+
302
+ >>> # We can also initialize a CLIPSegConfig from a CLIPSegTextConfig and a CLIPSegVisionConfig
303
+
304
+ >>> # Initializing a CLIPSegText and CLIPSegVision configuration
305
+ >>> config_text = CLIPSegTextConfig()
306
+ >>> config_vision = CLIPSegVisionConfig()
307
+
308
+ >>> config = CLIPSegConfig.from_text_vision_configs(config_text, config_vision)
309
+ ```"""
310
+
311
+ model_type = "clipseg"
312
+
313
+ def __init__(
314
+ self,
315
+ text_config=None,
316
+ vision_config=None,
317
+ projection_dim=512,
318
+ logit_scale_init_value=2.6592,
319
+ extract_layers=[3, 6, 9],
320
+ reduce_dim=64,
321
+ decoder_num_attention_heads=4,
322
+ decoder_attention_dropout=0.0,
323
+ decoder_hidden_act="quick_gelu",
324
+ decoder_intermediate_size=2048,
325
+ conditional_layer=0,
326
+ use_complex_transposed_convolution=False,
327
+ **kwargs,
328
+ ):
329
+ # If `_config_dict` exist, we use them for the backward compatibility.
330
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
331
+ # of confusion!).
332
+ text_config_dict = kwargs.pop("text_config_dict", None)
333
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
334
+
335
+ super().__init__(**kwargs)
336
+
337
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
338
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
339
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
340
+ if text_config_dict is not None:
341
+ if text_config is None:
342
+ text_config = {}
343
+
344
+ # This is the complete result when using `text_config_dict`.
345
+ _text_config_dict = CLIPSegTextConfig(**text_config_dict).to_dict()
346
+
347
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
348
+ for key, value in _text_config_dict.items():
349
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
350
+ # If specified in `text_config_dict`
351
+ if key in text_config_dict:
352
+ message = (
353
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
354
+ f'The value `text_config_dict["{key}"]` will be used instead.'
355
+ )
356
+ # If inferred from default argument values (just to be super careful)
357
+ else:
358
+ message = (
359
+ f"`text_config_dict` is provided which will be used to initialize `CLIPSegTextConfig`. The "
360
+ f'value `text_config["{key}"]` will be overriden.'
361
+ )
362
+ logger.info(message)
363
+
364
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
365
+ text_config.update(_text_config_dict)
366
+
367
+ if vision_config_dict is not None:
368
+ if vision_config is None:
369
+ vision_config = {}
370
+
371
+ # This is the complete result when using `vision_config_dict`.
372
+ _vision_config_dict = CLIPSegVisionConfig(**vision_config_dict).to_dict()
373
+ # convert keys to string instead of integer
374
+ if "id2label" in _vision_config_dict:
375
+ _vision_config_dict["id2label"] = {
376
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
377
+ }
378
+
379
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
380
+ for key, value in _vision_config_dict.items():
381
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
382
+ # If specified in `vision_config_dict`
383
+ if key in vision_config_dict:
384
+ message = (
385
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
386
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
387
+ )
388
+ # If inferred from default argument values (just to be super careful)
389
+ else:
390
+ message = (
391
+ f"`vision_config_dict` is provided which will be used to initialize `CLIPSegVisionConfig`. "
392
+ f'The value `vision_config["{key}"]` will be overriden.'
393
+ )
394
+ logger.info(message)
395
+
396
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
397
+ vision_config.update(_vision_config_dict)
398
+
399
+ if text_config is None:
400
+ text_config = {}
401
+ logger.info("`text_config` is `None`. Initializing the `CLIPSegTextConfig` with default values.")
402
+
403
+ if vision_config is None:
404
+ vision_config = {}
405
+ logger.info("`vision_config` is `None`. initializing the `CLIPSegVisionConfig` with default values.")
406
+
407
+ self.text_config = CLIPSegTextConfig(**text_config)
408
+ self.vision_config = CLIPSegVisionConfig(**vision_config)
409
+
410
+ self.projection_dim = projection_dim
411
+ self.logit_scale_init_value = logit_scale_init_value
412
+ self.extract_layers = extract_layers
413
+ self.reduce_dim = reduce_dim
414
+ self.decoder_num_attention_heads = decoder_num_attention_heads
415
+ self.decoder_attention_dropout = decoder_attention_dropout
416
+ self.decoder_hidden_act = decoder_hidden_act
417
+ self.decoder_intermediate_size = decoder_intermediate_size
418
+ self.conditional_layer = conditional_layer
419
+ self.initializer_factor = 1.0
420
+ self.use_complex_transposed_convolution = use_complex_transposed_convolution
421
+
422
+ @classmethod
423
+ def from_text_vision_configs(cls, text_config: CLIPSegTextConfig, vision_config: CLIPSegVisionConfig, **kwargs):
424
+ r"""
425
+ Instantiate a [`CLIPSegConfig`] (or a derived class) from clipseg text model configuration and clipseg vision
426
+ model configuration.
427
+
428
+ Returns:
429
+ [`CLIPSegConfig`]: An instance of a configuration object
430
+ """
431
+
432
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert CLIPSeg checkpoints from the original repository. URL: https://github.com/timojl/clipseg."""
17
+
18
+ import argparse
19
+
20
+ import requests
21
+ import torch
22
+ from PIL import Image
23
+
24
+ from transformers import (
25
+ CLIPSegConfig,
26
+ CLIPSegForImageSegmentation,
27
+ CLIPSegProcessor,
28
+ CLIPSegTextConfig,
29
+ CLIPSegVisionConfig,
30
+ CLIPTokenizer,
31
+ ViTImageProcessor,
32
+ )
33
+
34
+
35
+ def get_clipseg_config(model_name):
36
+ text_config = CLIPSegTextConfig()
37
+ vision_config = CLIPSegVisionConfig(patch_size=16)
38
+
39
+ use_complex_transposed_convolution = True if "refined" in model_name else False
40
+ reduce_dim = 16 if "rd16" in model_name else 64
41
+
42
+ config = CLIPSegConfig.from_text_vision_configs(
43
+ text_config,
44
+ vision_config,
45
+ use_complex_transposed_convolution=use_complex_transposed_convolution,
46
+ reduce_dim=reduce_dim,
47
+ )
48
+ return config
49
+
50
+
51
+ def rename_key(name):
52
+ # update prefixes
53
+ if "clip_model" in name:
54
+ name = name.replace("clip_model", "clip")
55
+ if "transformer" in name:
56
+ if "visual" in name:
57
+ name = name.replace("visual.transformer", "vision_model")
58
+ else:
59
+ name = name.replace("transformer", "text_model")
60
+ if "resblocks" in name:
61
+ name = name.replace("resblocks", "encoder.layers")
62
+ if "ln_1" in name:
63
+ name = name.replace("ln_1", "layer_norm1")
64
+ if "ln_2" in name:
65
+ name = name.replace("ln_2", "layer_norm2")
66
+ if "c_fc" in name:
67
+ name = name.replace("c_fc", "fc1")
68
+ if "c_proj" in name:
69
+ name = name.replace("c_proj", "fc2")
70
+ if "attn" in name and "self" not in name:
71
+ name = name.replace("attn", "self_attn")
72
+ # text encoder
73
+ if "token_embedding" in name:
74
+ name = name.replace("token_embedding", "text_model.embeddings.token_embedding")
75
+ if "positional_embedding" in name and "visual" not in name:
76
+ name = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight")
77
+ if "ln_final" in name:
78
+ name = name.replace("ln_final", "text_model.final_layer_norm")
79
+ # vision encoder
80
+ if "visual.class_embedding" in name:
81
+ name = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding")
82
+ if "visual.conv1" in name:
83
+ name = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding")
84
+ if "visual.positional_embedding" in name:
85
+ name = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight")
86
+ if "visual.ln_pre" in name:
87
+ name = name.replace("visual.ln_pre", "vision_model.pre_layrnorm")
88
+ if "visual.ln_post" in name:
89
+ name = name.replace("visual.ln_post", "vision_model.post_layernorm")
90
+ # projection layers
91
+ if "visual.proj" in name:
92
+ name = name.replace("visual.proj", "visual_projection.weight")
93
+ if "text_projection" in name:
94
+ name = name.replace("text_projection", "text_projection.weight")
95
+ # decoder
96
+ if "trans_conv" in name:
97
+ name = name.replace("trans_conv", "transposed_convolution")
98
+ if "film_mul" in name or "film_add" in name or "reduce" in name or "transposed_convolution" in name:
99
+ name = "decoder." + name
100
+ if "blocks" in name:
101
+ name = name.replace("blocks", "decoder.layers")
102
+ if "linear1" in name:
103
+ name = name.replace("linear1", "mlp.fc1")
104
+ if "linear2" in name:
105
+ name = name.replace("linear2", "mlp.fc2")
106
+ if "norm1" in name and "layer_" not in name:
107
+ name = name.replace("norm1", "layer_norm1")
108
+ if "norm2" in name and "layer_" not in name:
109
+ name = name.replace("norm2", "layer_norm2")
110
+
111
+ return name
112
+
113
+
114
+ def convert_state_dict(orig_state_dict, config):
115
+ for key in orig_state_dict.copy().keys():
116
+ val = orig_state_dict.pop(key)
117
+
118
+ if key.startswith("clip_model") and "attn.in_proj" in key:
119
+ key_split = key.split(".")
120
+ if "visual" in key:
121
+ layer_num = int(key_split[4])
122
+ dim = config.vision_config.hidden_size
123
+ prefix = "vision_model"
124
+ else:
125
+ layer_num = int(key_split[3])
126
+ dim = config.text_config.hidden_size
127
+ prefix = "text_model"
128
+
129
+ if "weight" in key:
130
+ orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
131
+ orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[
132
+ dim : dim * 2, :
133
+ ]
134
+ orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
135
+ else:
136
+ orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
137
+ orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
138
+ orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
139
+ elif "self_attn" in key and "out_proj" not in key:
140
+ key_split = key.split(".")
141
+ layer_num = int(key_split[1])
142
+ dim = config.reduce_dim
143
+ if "weight" in key:
144
+ orig_state_dict[f"decoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
145
+ orig_state_dict[f"decoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[dim : dim * 2, :]
146
+ orig_state_dict[f"decoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
147
+ else:
148
+ orig_state_dict[f"decoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
149
+ orig_state_dict[f"decoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
150
+ orig_state_dict[f"decoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
151
+ else:
152
+ new_name = rename_key(key)
153
+ if "visual_projection" in new_name or "text_projection" in new_name:
154
+ val = val.T
155
+ orig_state_dict[new_name] = val
156
+
157
+ return orig_state_dict
158
+
159
+
160
+ # We will verify our results on an image of cute cats
161
+ def prepare_img():
162
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
163
+ image = Image.open(requests.get(url, stream=True).raw)
164
+ return image
165
+
166
+
167
+ def convert_clipseg_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub):
168
+ config = get_clipseg_config(model_name)
169
+ model = CLIPSegForImageSegmentation(config)
170
+ model.eval()
171
+
172
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
173
+
174
+ # remove some keys
175
+ for key in state_dict.copy().keys():
176
+ if key.startswith("model"):
177
+ state_dict.pop(key, None)
178
+
179
+ # rename some keys
180
+ state_dict = convert_state_dict(state_dict, config)
181
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
182
+
183
+ if missing_keys != ["clip.text_model.embeddings.position_ids", "clip.vision_model.embeddings.position_ids"]:
184
+ raise ValueError("Missing keys that are not expected: {}".format(missing_keys))
185
+ if unexpected_keys != ["decoder.reduce.weight", "decoder.reduce.bias"]:
186
+ raise ValueError(f"Unexpected keys: {unexpected_keys}")
187
+
188
+ image_processor = ViTImageProcessor(size=352)
189
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
190
+ processor = CLIPSegProcessor(image_processor=image_processor, tokenizer=tokenizer)
191
+
192
+ image = prepare_img()
193
+ text = ["a glass", "something to fill", "wood", "a jar"]
194
+
195
+ inputs = processor(text=text, images=[image] * len(text), padding="max_length", return_tensors="pt")
196
+
197
+ with torch.no_grad():
198
+ outputs = model(**inputs)
199
+
200
+ # verify values
201
+ expected_conditional = torch.tensor([0.1110, -0.1882, 0.1645])
202
+ expected_pooled_output = torch.tensor([0.2692, -0.7197, -0.1328])
203
+ if model_name == "clipseg-rd64-refined":
204
+ expected_masks_slice = torch.tensor(
205
+ [[-10.0407, -9.9431, -10.2646], [-9.9751, -9.7064, -9.9586], [-9.6891, -9.5645, -9.9618]]
206
+ )
207
+ elif model_name == "clipseg-rd64":
208
+ expected_masks_slice = torch.tensor(
209
+ [[-7.2877, -7.2711, -7.2463], [-7.2652, -7.2780, -7.2520], [-7.2239, -7.2204, -7.2001]]
210
+ )
211
+ elif model_name == "clipseg-rd16":
212
+ expected_masks_slice = torch.tensor(
213
+ [[-6.3955, -6.4055, -6.4151], [-6.3911, -6.4033, -6.4100], [-6.3474, -6.3702, -6.3762]]
214
+ )
215
+ else:
216
+ raise ValueError(f"Model name {model_name} not supported.")
217
+
218
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3)
219
+ assert torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3)
220
+ assert torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3)
221
+ print("Looks ok!")
222
+
223
+ if pytorch_dump_folder_path is not None:
224
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
225
+ model.save_pretrained(pytorch_dump_folder_path)
226
+ processor.save_pretrained(pytorch_dump_folder_path)
227
+
228
+ if push_to_hub:
229
+ print(f"Pushing model and processor for {model_name} to the hub")
230
+ model.push_to_hub(f"CIDAS/{model_name}")
231
+ processor.push_to_hub(f"CIDAS/{model_name}")
232
+
233
+
234
+ if __name__ == "__main__":
235
+ parser = argparse.ArgumentParser()
236
+ # Required parameters
237
+ parser.add_argument(
238
+ "--model_name",
239
+ default="clipseg-rd64",
240
+ type=str,
241
+ choices=["clipseg-rd16", "clipseg-rd64", "clipseg-rd64-refined"],
242
+ help=(
243
+ "Name of the model. Supported models are: clipseg-rd64, clipseg-rd16 and clipseg-rd64-refined (rd meaning"
244
+ " reduce dimension)"
245
+ ),
246
+ )
247
+ parser.add_argument(
248
+ "--checkpoint_path",
249
+ default="/Users/nielsrogge/Documents/CLIPSeg/clip_plus_rd64-uni.pth",
250
+ type=str,
251
+ help=(
252
+ "Path to the original checkpoint. Note that the script assumes that the checkpoint includes both CLIP and"
253
+ " the decoder weights."
254
+ ),
255
+ )
256
+ parser.add_argument(
257
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
258
+ )
259
+ parser.add_argument(
260
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
261
+ )
262
+
263
+ args = parser.parse_args()
264
+ convert_clipseg_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/modeling_clipseg.py ADDED
@@ -0,0 +1,1477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CLIPSeg model."""
16
+
17
+ import copy
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
28
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ replace_return_docstrings,
36
+ )
37
+ from .configuration_clipseg import CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ _CHECKPOINT_FOR_DOC = "CIDAS/clipseg-rd64-refined"
44
+
45
+
46
+ from ..deprecated._archive_maps import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
47
+
48
+
49
+ # contrastive loss function, adapted from
50
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
51
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
52
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
53
+
54
+
55
+ # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->clipseg
56
+ def clipseg_loss(similarity: torch.Tensor) -> torch.Tensor:
57
+ caption_loss = contrastive_loss(similarity)
58
+ image_loss = contrastive_loss(similarity.t())
59
+ return (caption_loss + image_loss) / 2.0
60
+
61
+
62
+ @dataclass
63
+ # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->CLIPSeg
64
+ class CLIPSegOutput(ModelOutput):
65
+ """
66
+ Args:
67
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
68
+ Contrastive loss for image-text similarity.
69
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
70
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
71
+ similarity scores.
72
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
73
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
74
+ similarity scores.
75
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
76
+ The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`].
77
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
78
+ The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`].
79
+ text_model_output(`BaseModelOutputWithPooling`):
80
+ The output of the [`CLIPSegTextModel`].
81
+ vision_model_output(`BaseModelOutputWithPooling`):
82
+ The output of the [`CLIPSegVisionModel`].
83
+ """
84
+
85
+ loss: Optional[torch.FloatTensor] = None
86
+ logits_per_image: torch.FloatTensor = None
87
+ logits_per_text: torch.FloatTensor = None
88
+ text_embeds: torch.FloatTensor = None
89
+ image_embeds: torch.FloatTensor = None
90
+ text_model_output: BaseModelOutputWithPooling = None
91
+ vision_model_output: BaseModelOutputWithPooling = None
92
+
93
+ def to_tuple(self) -> Tuple[Any]:
94
+ return tuple(
95
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
96
+ for k in self.keys()
97
+ )
98
+
99
+
100
+ @dataclass
101
+ class CLIPSegDecoderOutput(ModelOutput):
102
+ """
103
+ Args:
104
+ logits (`torch.FloatTensor` of shape `(batch_size, height, width)`):
105
+ Classification scores for each pixel.
106
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
107
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
108
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
109
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
110
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
111
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
112
+ the self-attention heads.
113
+ """
114
+
115
+ logits: torch.FloatTensor = None
116
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
117
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
118
+
119
+
120
+ @dataclass
121
+ class CLIPSegImageSegmentationOutput(ModelOutput):
122
+ """
123
+ Args:
124
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
125
+ Contrastive loss for image-text similarity.
126
+ ...
127
+ vision_model_output (`BaseModelOutputWithPooling`):
128
+ The output of the [`CLIPSegVisionModel`].
129
+ """
130
+
131
+ loss: Optional[torch.FloatTensor] = None
132
+ logits: torch.FloatTensor = None
133
+ conditional_embeddings: torch.FloatTensor = None
134
+ pooled_output: torch.FloatTensor = None
135
+ vision_model_output: BaseModelOutputWithPooling = None
136
+ decoder_output: CLIPSegDecoderOutput = None
137
+
138
+ def to_tuple(self) -> Tuple[Any]:
139
+ return tuple(
140
+ self[k] if k not in ["vision_model_output", "decoder_output"] else getattr(self, k).to_tuple()
141
+ for k in self.keys()
142
+ )
143
+
144
+
145
+ class CLIPSegVisionEmbeddings(nn.Module):
146
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.__init__ with CLIP->CLIPSeg
147
+ def __init__(self, config: CLIPSegVisionConfig):
148
+ super().__init__()
149
+ self.config = config
150
+ self.embed_dim = config.hidden_size
151
+ self.image_size = config.image_size
152
+ self.patch_size = config.patch_size
153
+
154
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
155
+
156
+ self.patch_embedding = nn.Conv2d(
157
+ in_channels=config.num_channels,
158
+ out_channels=self.embed_dim,
159
+ kernel_size=self.patch_size,
160
+ stride=self.patch_size,
161
+ bias=False,
162
+ )
163
+
164
+ self.num_patches = (self.image_size // self.patch_size) ** 2
165
+ self.num_positions = self.num_patches + 1
166
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
167
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
168
+
169
+ def interpolate_position_embeddings(self, new_size):
170
+ if len(new_size) != 2:
171
+ raise ValueError("new_size should consist of 2 values")
172
+
173
+ num_patches_one_direction = int(self.num_patches**0.5)
174
+ # we interpolate the position embeddings in 2D
175
+ a = self.position_embedding.weight[1:].T.view(
176
+ 1, self.config.hidden_size, num_patches_one_direction, num_patches_one_direction
177
+ )
178
+ b = (
179
+ nn.functional.interpolate(a, new_size, mode="bicubic", align_corners=False)
180
+ .squeeze(0)
181
+ .view(self.config.hidden_size, new_size[0] * new_size[1])
182
+ .T
183
+ )
184
+ result = torch.cat([self.position_embedding.weight[:1], b])
185
+
186
+ return result
187
+
188
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
189
+ batch_size = pixel_values.shape[0]
190
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
191
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
192
+
193
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
194
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
195
+
196
+ if embeddings.shape[1] != self.num_positions:
197
+ new_shape = int(math.sqrt(embeddings.shape[1] - 1))
198
+ embeddings = embeddings + self.interpolate_position_embeddings((new_shape, new_shape))
199
+ embeddings = embeddings.to(embeddings.dtype)
200
+ else:
201
+ embeddings = embeddings + self.position_embedding(self.position_ids)
202
+
203
+ return embeddings
204
+
205
+
206
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->CLIPSeg
207
+ class CLIPSegTextEmbeddings(nn.Module):
208
+ def __init__(self, config: CLIPSegTextConfig):
209
+ super().__init__()
210
+ embed_dim = config.hidden_size
211
+
212
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
213
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
214
+
215
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
216
+ self.register_buffer(
217
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
218
+ )
219
+
220
+ def forward(
221
+ self,
222
+ input_ids: Optional[torch.LongTensor] = None,
223
+ position_ids: Optional[torch.LongTensor] = None,
224
+ inputs_embeds: Optional[torch.FloatTensor] = None,
225
+ ) -> torch.Tensor:
226
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
227
+
228
+ if position_ids is None:
229
+ position_ids = self.position_ids[:, :seq_length]
230
+
231
+ if inputs_embeds is None:
232
+ inputs_embeds = self.token_embedding(input_ids)
233
+
234
+ position_embeddings = self.position_embedding(position_ids)
235
+ embeddings = inputs_embeds + position_embeddings
236
+
237
+ return embeddings
238
+
239
+
240
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->CLIPSeg
241
+ class CLIPSegAttention(nn.Module):
242
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
243
+
244
+ def __init__(self, config):
245
+ super().__init__()
246
+ self.config = config
247
+ self.embed_dim = config.hidden_size
248
+ self.num_heads = config.num_attention_heads
249
+ self.head_dim = self.embed_dim // self.num_heads
250
+ if self.head_dim * self.num_heads != self.embed_dim:
251
+ raise ValueError(
252
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
253
+ f" {self.num_heads})."
254
+ )
255
+ self.scale = self.head_dim**-0.5
256
+ self.dropout = config.attention_dropout
257
+
258
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
259
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
260
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
261
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
262
+
263
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
264
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
265
+
266
+ def forward(
267
+ self,
268
+ hidden_states: torch.Tensor,
269
+ attention_mask: Optional[torch.Tensor] = None,
270
+ causal_attention_mask: Optional[torch.Tensor] = None,
271
+ output_attentions: Optional[bool] = False,
272
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
273
+ """Input shape: Batch x Time x Channel"""
274
+
275
+ bsz, tgt_len, embed_dim = hidden_states.size()
276
+
277
+ # get query proj
278
+ query_states = self.q_proj(hidden_states) * self.scale
279
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
280
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
281
+
282
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
283
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
284
+ key_states = key_states.view(*proj_shape)
285
+ value_states = value_states.view(*proj_shape)
286
+
287
+ src_len = key_states.size(1)
288
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
289
+
290
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
291
+ raise ValueError(
292
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
293
+ f" {attn_weights.size()}"
294
+ )
295
+
296
+ # apply the causal_attention_mask first
297
+ if causal_attention_mask is not None:
298
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
299
+ raise ValueError(
300
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
301
+ f" {causal_attention_mask.size()}"
302
+ )
303
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
304
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
305
+
306
+ if attention_mask is not None:
307
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
308
+ raise ValueError(
309
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
310
+ )
311
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
312
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
313
+
314
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
315
+
316
+ if output_attentions:
317
+ # this operation is a bit akward, but it's required to
318
+ # make sure that attn_weights keeps its gradient.
319
+ # In order to do so, attn_weights have to reshaped
320
+ # twice and have to be reused in the following
321
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
322
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
323
+ else:
324
+ attn_weights_reshaped = None
325
+
326
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
327
+
328
+ attn_output = torch.bmm(attn_probs, value_states)
329
+
330
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
331
+ raise ValueError(
332
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
333
+ f" {attn_output.size()}"
334
+ )
335
+
336
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
337
+ attn_output = attn_output.transpose(1, 2)
338
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
339
+
340
+ attn_output = self.out_proj(attn_output)
341
+
342
+ return attn_output, attn_weights_reshaped
343
+
344
+
345
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->CLIPSeg
346
+ class CLIPSegMLP(nn.Module):
347
+ def __init__(self, config):
348
+ super().__init__()
349
+ self.config = config
350
+ self.activation_fn = ACT2FN[config.hidden_act]
351
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
352
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
353
+
354
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
355
+ hidden_states = self.fc1(hidden_states)
356
+ hidden_states = self.activation_fn(hidden_states)
357
+ hidden_states = self.fc2(hidden_states)
358
+ return hidden_states
359
+
360
+
361
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->CLIPSeg
362
+ class CLIPSegEncoderLayer(nn.Module):
363
+ def __init__(self, config: CLIPSegConfig):
364
+ super().__init__()
365
+ self.embed_dim = config.hidden_size
366
+ self.self_attn = CLIPSegAttention(config)
367
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
368
+ self.mlp = CLIPSegMLP(config)
369
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
370
+
371
+ def forward(
372
+ self,
373
+ hidden_states: torch.Tensor,
374
+ attention_mask: torch.Tensor,
375
+ causal_attention_mask: torch.Tensor,
376
+ output_attentions: Optional[bool] = False,
377
+ ) -> Tuple[torch.FloatTensor]:
378
+ """
379
+ Args:
380
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
381
+ attention_mask (`torch.FloatTensor`): attention mask of size
382
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
383
+ `(config.encoder_attention_heads,)`.
384
+ output_attentions (`bool`, *optional*):
385
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
386
+ returned tensors for more detail.
387
+ """
388
+ residual = hidden_states
389
+
390
+ hidden_states = self.layer_norm1(hidden_states)
391
+ hidden_states, attn_weights = self.self_attn(
392
+ hidden_states=hidden_states,
393
+ attention_mask=attention_mask,
394
+ causal_attention_mask=causal_attention_mask,
395
+ output_attentions=output_attentions,
396
+ )
397
+ hidden_states = residual + hidden_states
398
+
399
+ residual = hidden_states
400
+ hidden_states = self.layer_norm2(hidden_states)
401
+ hidden_states = self.mlp(hidden_states)
402
+ hidden_states = residual + hidden_states
403
+
404
+ outputs = (hidden_states,)
405
+
406
+ if output_attentions:
407
+ outputs += (attn_weights,)
408
+
409
+ return outputs
410
+
411
+
412
+ class CLIPSegPreTrainedModel(PreTrainedModel):
413
+ """
414
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
415
+ models.
416
+ """
417
+
418
+ config_class = CLIPSegConfig
419
+ base_model_prefix = "clip"
420
+ supports_gradient_checkpointing = True
421
+
422
+ def _init_weights(self, module):
423
+ """Initialize the weights"""
424
+ factor = self.config.initializer_factor
425
+ if isinstance(module, CLIPSegTextEmbeddings):
426
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
427
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
428
+ elif isinstance(module, CLIPSegVisionEmbeddings):
429
+ factor = self.config.initializer_factor
430
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
431
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
432
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
433
+ elif isinstance(module, CLIPSegAttention):
434
+ factor = self.config.initializer_factor
435
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
436
+ out_proj_std = (module.embed_dim**-0.5) * factor
437
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
438
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
439
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
440
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
441
+ elif isinstance(module, CLIPSegMLP):
442
+ factor = self.config.initializer_factor
443
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
444
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
445
+ nn.init.normal_(module.fc1.weight, std=fc_std)
446
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
447
+ elif isinstance(module, CLIPSegModel):
448
+ nn.init.normal_(
449
+ module.text_projection.weight,
450
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
451
+ )
452
+ nn.init.normal_(
453
+ module.visual_projection.weight,
454
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
455
+ )
456
+
457
+ if isinstance(module, nn.LayerNorm):
458
+ module.bias.data.zero_()
459
+ module.weight.data.fill_(1.0)
460
+ if isinstance(module, nn.Linear) and module.bias is not None:
461
+ module.bias.data.zero_()
462
+
463
+
464
+ CLIPSEG_START_DOCSTRING = r"""
465
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
466
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
467
+ behavior.
468
+
469
+ Parameters:
470
+ config ([`CLIPSegConfig`]): Model configuration class with all the parameters of the model.
471
+ Initializing with a config file does not load the weights associated with the model, only the
472
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
473
+ """
474
+
475
+ CLIPSEG_TEXT_INPUTS_DOCSTRING = r"""
476
+ Args:
477
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
478
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
479
+ it.
480
+
481
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
482
+ [`PreTrainedTokenizer.__call__`] for details.
483
+
484
+ [What are input IDs?](../glossary#input-ids)
485
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
486
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
487
+
488
+ - 1 for tokens that are **not masked**,
489
+ - 0 for tokens that are **masked**.
490
+
491
+ [What are attention masks?](../glossary#attention-mask)
492
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
493
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
494
+ config.max_position_embeddings - 1]`.
495
+
496
+ [What are position IDs?](../glossary#position-ids)
497
+ output_attentions (`bool`, *optional*):
498
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
499
+ tensors for more detail.
500
+ output_hidden_states (`bool`, *optional*):
501
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
502
+ more detail.
503
+ return_dict (`bool`, *optional*):
504
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
505
+ """
506
+
507
+ CLIPSEG_VISION_INPUTS_DOCSTRING = r"""
508
+ Args:
509
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
510
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
511
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
512
+ output_attentions (`bool`, *optional*):
513
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
514
+ tensors for more detail.
515
+ output_hidden_states (`bool`, *optional*):
516
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
517
+ more detail.
518
+ return_dict (`bool`, *optional*):
519
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
520
+ """
521
+
522
+ CLIPSEG_INPUTS_DOCSTRING = r"""
523
+ Args:
524
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
525
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
526
+ it.
527
+
528
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
529
+ [`PreTrainedTokenizer.__call__`] for details.
530
+
531
+ [What are input IDs?](../glossary#input-ids)
532
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
533
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
534
+
535
+ - 1 for tokens that are **not masked**,
536
+ - 0 for tokens that are **masked**.
537
+
538
+ [What are attention masks?](../glossary#attention-mask)
539
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
540
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
541
+ config.max_position_embeddings - 1]`.
542
+
543
+ [What are position IDs?](../glossary#position-ids)
544
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
545
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
546
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
547
+ return_loss (`bool`, *optional*):
548
+ Whether or not to return the contrastive loss.
549
+ output_attentions (`bool`, *optional*):
550
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
551
+ tensors for more detail.
552
+ output_hidden_states (`bool`, *optional*):
553
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
554
+ more detail.
555
+ return_dict (`bool`, *optional*):
556
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
557
+ """
558
+
559
+
560
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->CLIPSeg
561
+ class CLIPSegEncoder(nn.Module):
562
+ """
563
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
564
+ [`CLIPSegEncoderLayer`].
565
+
566
+ Args:
567
+ config: CLIPSegConfig
568
+ """
569
+
570
+ def __init__(self, config: CLIPSegConfig):
571
+ super().__init__()
572
+ self.config = config
573
+ self.layers = nn.ModuleList([CLIPSegEncoderLayer(config) for _ in range(config.num_hidden_layers)])
574
+ self.gradient_checkpointing = False
575
+
576
+ def forward(
577
+ self,
578
+ inputs_embeds,
579
+ attention_mask: Optional[torch.Tensor] = None,
580
+ causal_attention_mask: Optional[torch.Tensor] = None,
581
+ output_attentions: Optional[bool] = None,
582
+ output_hidden_states: Optional[bool] = None,
583
+ return_dict: Optional[bool] = None,
584
+ ) -> Union[Tuple, BaseModelOutput]:
585
+ r"""
586
+ Args:
587
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
588
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
589
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
590
+ than the model's internal embedding lookup matrix.
591
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
592
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
593
+
594
+ - 1 for tokens that are **not masked**,
595
+ - 0 for tokens that are **masked**.
596
+
597
+ [What are attention masks?](../glossary#attention-mask)
598
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
599
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
600
+
601
+ - 1 for tokens that are **not masked**,
602
+ - 0 for tokens that are **masked**.
603
+
604
+ [What are attention masks?](../glossary#attention-mask)
605
+ output_attentions (`bool`, *optional*):
606
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
607
+ returned tensors for more detail.
608
+ output_hidden_states (`bool`, *optional*):
609
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
610
+ for more detail.
611
+ return_dict (`bool`, *optional*):
612
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
613
+ """
614
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
615
+ output_hidden_states = (
616
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
617
+ )
618
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
619
+
620
+ encoder_states = () if output_hidden_states else None
621
+ all_attentions = () if output_attentions else None
622
+
623
+ hidden_states = inputs_embeds
624
+ for idx, encoder_layer in enumerate(self.layers):
625
+ if output_hidden_states:
626
+ encoder_states = encoder_states + (hidden_states,)
627
+ if self.gradient_checkpointing and self.training:
628
+ layer_outputs = self._gradient_checkpointing_func(
629
+ encoder_layer.__call__,
630
+ hidden_states,
631
+ attention_mask,
632
+ causal_attention_mask,
633
+ output_attentions,
634
+ )
635
+ else:
636
+ layer_outputs = encoder_layer(
637
+ hidden_states,
638
+ attention_mask,
639
+ causal_attention_mask,
640
+ output_attentions=output_attentions,
641
+ )
642
+
643
+ hidden_states = layer_outputs[0]
644
+
645
+ if output_attentions:
646
+ all_attentions = all_attentions + (layer_outputs[1],)
647
+
648
+ if output_hidden_states:
649
+ encoder_states = encoder_states + (hidden_states,)
650
+
651
+ if not return_dict:
652
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
653
+ return BaseModelOutput(
654
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
655
+ )
656
+
657
+
658
+ class CLIPSegTextTransformer(nn.Module):
659
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer.__init__ with CLIP->CLIPSeg
660
+ def __init__(self, config: CLIPSegTextConfig):
661
+ super().__init__()
662
+ self.config = config
663
+ embed_dim = config.hidden_size
664
+ self.embeddings = CLIPSegTextEmbeddings(config)
665
+ self.encoder = CLIPSegEncoder(config)
666
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
667
+
668
+ # For `pooled_output` computation
669
+ self.eos_token_id = config.eos_token_id
670
+
671
+ @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING)
672
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig)
673
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer.forward with clip->clipseg, CLIP->CLIPSeg
674
+ def forward(
675
+ self,
676
+ input_ids: Optional[torch.Tensor] = None,
677
+ attention_mask: Optional[torch.Tensor] = None,
678
+ position_ids: Optional[torch.Tensor] = None,
679
+ output_attentions: Optional[bool] = None,
680
+ output_hidden_states: Optional[bool] = None,
681
+ return_dict: Optional[bool] = None,
682
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
683
+ r"""
684
+ Returns:
685
+
686
+ """
687
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
688
+ output_hidden_states = (
689
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
690
+ )
691
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
692
+
693
+ if input_ids is None:
694
+ raise ValueError("You have to specify input_ids")
695
+
696
+ input_shape = input_ids.size()
697
+ input_ids = input_ids.view(-1, input_shape[-1])
698
+
699
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
700
+
701
+ # CLIPSeg's text model uses causal mask, prepare it here.
702
+ # https://github.com/openai/CLIPSeg/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clipseg/model.py#L324
703
+ causal_attention_mask = _create_4d_causal_attention_mask(
704
+ input_shape, hidden_states.dtype, device=hidden_states.device
705
+ )
706
+ # expand attention_mask
707
+ if attention_mask is not None:
708
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
709
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
710
+
711
+ encoder_outputs = self.encoder(
712
+ inputs_embeds=hidden_states,
713
+ attention_mask=attention_mask,
714
+ causal_attention_mask=causal_attention_mask,
715
+ output_attentions=output_attentions,
716
+ output_hidden_states=output_hidden_states,
717
+ return_dict=return_dict,
718
+ )
719
+
720
+ last_hidden_state = encoder_outputs[0]
721
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
722
+
723
+ if self.eos_token_id == 2:
724
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
725
+ # A CLIPSeg model with such `eos_token_id` in the config can't work correctly with extra new tokens added
726
+ # ------------------------------------------------------------
727
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
728
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
729
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
730
+ pooled_output = last_hidden_state[
731
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
732
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
733
+ ]
734
+ else:
735
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
736
+ pooled_output = last_hidden_state[
737
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
738
+ # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
739
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
740
+ .int()
741
+ .argmax(dim=-1),
742
+ ]
743
+
744
+ if not return_dict:
745
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
746
+
747
+ return BaseModelOutputWithPooling(
748
+ last_hidden_state=last_hidden_state,
749
+ pooler_output=pooled_output,
750
+ hidden_states=encoder_outputs.hidden_states,
751
+ attentions=encoder_outputs.attentions,
752
+ )
753
+
754
+
755
+ class CLIPSegTextModel(CLIPSegPreTrainedModel):
756
+ config_class = CLIPSegTextConfig
757
+
758
+ _no_split_modules = ["CLIPSegTextEmbeddings", "CLIPSegEncoderLayer"]
759
+
760
+ def __init__(self, config: CLIPSegTextConfig):
761
+ super().__init__(config)
762
+ self.text_model = CLIPSegTextTransformer(config)
763
+ # Initialize weights and apply final processing
764
+ self.post_init()
765
+
766
+ def get_input_embeddings(self) -> nn.Module:
767
+ return self.text_model.embeddings.token_embedding
768
+
769
+ def set_input_embeddings(self, value):
770
+ self.text_model.embeddings.token_embedding = value
771
+
772
+ @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING)
773
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig)
774
+ def forward(
775
+ self,
776
+ input_ids: Optional[torch.Tensor] = None,
777
+ attention_mask: Optional[torch.Tensor] = None,
778
+ position_ids: Optional[torch.Tensor] = None,
779
+ output_attentions: Optional[bool] = None,
780
+ output_hidden_states: Optional[bool] = None,
781
+ return_dict: Optional[bool] = None,
782
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
783
+ r"""
784
+ Returns:
785
+
786
+ Examples:
787
+
788
+ ```python
789
+ >>> from transformers import AutoTokenizer, CLIPSegTextModel
790
+
791
+ >>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
792
+ >>> model = CLIPSegTextModel.from_pretrained("CIDAS/clipseg-rd64-refined")
793
+
794
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
795
+
796
+ >>> outputs = model(**inputs)
797
+ >>> last_hidden_state = outputs.last_hidden_state
798
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
799
+ ```"""
800
+ return self.text_model(
801
+ input_ids=input_ids,
802
+ attention_mask=attention_mask,
803
+ position_ids=position_ids,
804
+ output_attentions=output_attentions,
805
+ output_hidden_states=output_hidden_states,
806
+ return_dict=return_dict,
807
+ )
808
+
809
+
810
+ class CLIPSegVisionTransformer(nn.Module):
811
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIP->CLIPSeg
812
+ def __init__(self, config: CLIPSegVisionConfig):
813
+ super().__init__()
814
+ self.config = config
815
+ embed_dim = config.hidden_size
816
+
817
+ self.embeddings = CLIPSegVisionEmbeddings(config)
818
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
819
+ self.encoder = CLIPSegEncoder(config)
820
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
821
+
822
+ @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING)
823
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig)
824
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
825
+ def forward(
826
+ self,
827
+ pixel_values: Optional[torch.FloatTensor] = None,
828
+ output_attentions: Optional[bool] = None,
829
+ output_hidden_states: Optional[bool] = None,
830
+ return_dict: Optional[bool] = None,
831
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
832
+ r"""
833
+ Returns:
834
+
835
+ """
836
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
837
+ output_hidden_states = (
838
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
839
+ )
840
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
841
+
842
+ if pixel_values is None:
843
+ raise ValueError("You have to specify pixel_values")
844
+
845
+ hidden_states = self.embeddings(pixel_values)
846
+ hidden_states = self.pre_layrnorm(hidden_states)
847
+
848
+ encoder_outputs = self.encoder(
849
+ inputs_embeds=hidden_states,
850
+ output_attentions=output_attentions,
851
+ output_hidden_states=output_hidden_states,
852
+ return_dict=return_dict,
853
+ )
854
+
855
+ last_hidden_state = encoder_outputs[0]
856
+ pooled_output = last_hidden_state[:, 0, :]
857
+ pooled_output = self.post_layernorm(pooled_output)
858
+
859
+ if not return_dict:
860
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
861
+
862
+ return BaseModelOutputWithPooling(
863
+ last_hidden_state=last_hidden_state,
864
+ pooler_output=pooled_output,
865
+ hidden_states=encoder_outputs.hidden_states,
866
+ attentions=encoder_outputs.attentions,
867
+ )
868
+
869
+
870
+ class CLIPSegVisionModel(CLIPSegPreTrainedModel):
871
+ config_class = CLIPSegVisionConfig
872
+ main_input_name = "pixel_values"
873
+
874
+ def __init__(self, config: CLIPSegVisionConfig):
875
+ super().__init__(config)
876
+ self.vision_model = CLIPSegVisionTransformer(config)
877
+ # Initialize weights and apply final processing
878
+ self.post_init()
879
+
880
+ def get_input_embeddings(self) -> nn.Module:
881
+ return self.vision_model.embeddings.patch_embedding
882
+
883
+ @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING)
884
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig)
885
+ def forward(
886
+ self,
887
+ pixel_values: Optional[torch.FloatTensor] = None,
888
+ output_attentions: Optional[bool] = None,
889
+ output_hidden_states: Optional[bool] = None,
890
+ return_dict: Optional[bool] = None,
891
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
892
+ r"""
893
+ Returns:
894
+
895
+ Examples:
896
+
897
+ ```python
898
+ >>> from PIL import Image
899
+ >>> import requests
900
+ >>> from transformers import AutoProcessor, CLIPSegVisionModel
901
+
902
+ >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
903
+ >>> model = CLIPSegVisionModel.from_pretrained("CIDAS/clipseg-rd64-refined")
904
+
905
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
906
+ >>> image = Image.open(requests.get(url, stream=True).raw)
907
+
908
+ >>> inputs = processor(images=image, return_tensors="pt")
909
+
910
+ >>> outputs = model(**inputs)
911
+ >>> last_hidden_state = outputs.last_hidden_state
912
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
913
+ ```"""
914
+ return self.vision_model(
915
+ pixel_values=pixel_values,
916
+ output_attentions=output_attentions,
917
+ output_hidden_states=output_hidden_states,
918
+ return_dict=return_dict,
919
+ )
920
+
921
+
922
+ @add_start_docstrings(CLIPSEG_START_DOCSTRING)
923
+ class CLIPSegModel(CLIPSegPreTrainedModel):
924
+ config_class = CLIPSegConfig
925
+
926
+ def __init__(self, config: CLIPSegConfig):
927
+ super().__init__(config)
928
+
929
+ if not isinstance(config.text_config, CLIPSegTextConfig):
930
+ raise ValueError(
931
+ "config.text_config is expected to be of type CLIPSegTextConfig but is of type"
932
+ f" {type(config.text_config)}."
933
+ )
934
+
935
+ if not isinstance(config.vision_config, CLIPSegVisionConfig):
936
+ raise ValueError(
937
+ "config.vision_config is expected to be of type CLIPSegVisionConfig but is of type"
938
+ f" {type(config.vision_config)}."
939
+ )
940
+
941
+ text_config = config.text_config
942
+ vision_config = config.vision_config
943
+
944
+ self.projection_dim = config.projection_dim
945
+ self.text_embed_dim = text_config.hidden_size
946
+ self.vision_embed_dim = vision_config.hidden_size
947
+
948
+ self.text_model = CLIPSegTextTransformer(text_config)
949
+ self.vision_model = CLIPSegVisionTransformer(vision_config)
950
+
951
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
952
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
953
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
954
+
955
+ # Initialize weights and apply final processing
956
+ self.post_init()
957
+
958
+ @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING)
959
+ def get_text_features(
960
+ self,
961
+ input_ids: Optional[torch.Tensor] = None,
962
+ attention_mask: Optional[torch.Tensor] = None,
963
+ position_ids: Optional[torch.Tensor] = None,
964
+ output_attentions: Optional[bool] = None,
965
+ output_hidden_states: Optional[bool] = None,
966
+ return_dict: Optional[bool] = None,
967
+ ) -> torch.FloatTensor:
968
+ r"""
969
+ Returns:
970
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
971
+ applying the projection layer to the pooled output of [`CLIPSegTextModel`].
972
+
973
+ Examples:
974
+
975
+ ```python
976
+ >>> from transformers import AutoTokenizer, CLIPSegModel
977
+
978
+ >>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
979
+ >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
980
+
981
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
982
+ >>> text_features = model.get_text_features(**inputs)
983
+ ```"""
984
+ # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components.
985
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
986
+ output_hidden_states = (
987
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
988
+ )
989
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
990
+
991
+ text_outputs = self.text_model(
992
+ input_ids=input_ids,
993
+ attention_mask=attention_mask,
994
+ position_ids=position_ids,
995
+ output_attentions=output_attentions,
996
+ output_hidden_states=output_hidden_states,
997
+ return_dict=return_dict,
998
+ )
999
+
1000
+ pooled_output = text_outputs[1]
1001
+ text_features = self.text_projection(pooled_output)
1002
+
1003
+ return text_features
1004
+
1005
+ @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING)
1006
+ def get_image_features(
1007
+ self,
1008
+ pixel_values: Optional[torch.FloatTensor] = None,
1009
+ output_attentions: Optional[bool] = None,
1010
+ output_hidden_states: Optional[bool] = None,
1011
+ return_dict: Optional[bool] = None,
1012
+ ) -> torch.FloatTensor:
1013
+ r"""
1014
+ Returns:
1015
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1016
+ applying the projection layer to the pooled output of [`CLIPSegVisionModel`].
1017
+
1018
+ Examples:
1019
+
1020
+ ```python
1021
+ >>> from PIL import Image
1022
+ >>> import requests
1023
+ >>> from transformers import AutoProcessor, CLIPSegModel
1024
+
1025
+ >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
1026
+ >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
1027
+
1028
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1029
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1030
+
1031
+ >>> inputs = processor(images=image, return_tensors="pt")
1032
+
1033
+ >>> image_features = model.get_image_features(**inputs)
1034
+ ```"""
1035
+ # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components.
1036
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1037
+ output_hidden_states = (
1038
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1039
+ )
1040
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1041
+
1042
+ vision_outputs = self.vision_model(
1043
+ pixel_values=pixel_values,
1044
+ output_attentions=output_attentions,
1045
+ output_hidden_states=output_hidden_states,
1046
+ return_dict=return_dict,
1047
+ )
1048
+
1049
+ pooled_output = vision_outputs[1] # pooled_output
1050
+ image_features = self.visual_projection(pooled_output)
1051
+
1052
+ return image_features
1053
+
1054
+ @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING)
1055
+ @replace_return_docstrings(output_type=CLIPSegOutput, config_class=CLIPSegConfig)
1056
+ def forward(
1057
+ self,
1058
+ input_ids: Optional[torch.LongTensor] = None,
1059
+ pixel_values: Optional[torch.FloatTensor] = None,
1060
+ attention_mask: Optional[torch.Tensor] = None,
1061
+ position_ids: Optional[torch.LongTensor] = None,
1062
+ return_loss: Optional[bool] = None,
1063
+ output_attentions: Optional[bool] = None,
1064
+ output_hidden_states: Optional[bool] = None,
1065
+ return_dict: Optional[bool] = None,
1066
+ ) -> Union[Tuple, CLIPSegOutput]:
1067
+ r"""
1068
+ Returns:
1069
+
1070
+ Examples:
1071
+
1072
+ ```python
1073
+ >>> from PIL import Image
1074
+ >>> import requests
1075
+ >>> from transformers import AutoProcessor, CLIPSegModel
1076
+
1077
+ >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
1078
+ >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
1079
+
1080
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1081
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1082
+
1083
+ >>> inputs = processor(
1084
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1085
+ ... )
1086
+
1087
+ >>> outputs = model(**inputs)
1088
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1089
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1090
+ ```"""
1091
+ # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components.
1092
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1093
+ output_hidden_states = (
1094
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1095
+ )
1096
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1097
+
1098
+ vision_outputs = self.vision_model(
1099
+ pixel_values=pixel_values,
1100
+ output_attentions=output_attentions,
1101
+ output_hidden_states=output_hidden_states,
1102
+ return_dict=return_dict,
1103
+ )
1104
+
1105
+ text_outputs = self.text_model(
1106
+ input_ids=input_ids,
1107
+ attention_mask=attention_mask,
1108
+ position_ids=position_ids,
1109
+ output_attentions=output_attentions,
1110
+ output_hidden_states=output_hidden_states,
1111
+ return_dict=return_dict,
1112
+ )
1113
+
1114
+ image_embeds = vision_outputs[1]
1115
+ image_embeds = self.visual_projection(image_embeds)
1116
+
1117
+ text_embeds = text_outputs[1]
1118
+ text_embeds = self.text_projection(text_embeds)
1119
+
1120
+ # normalized features
1121
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1122
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1123
+
1124
+ # cosine similarity as logits
1125
+ logit_scale = self.logit_scale.exp()
1126
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1127
+ logits_per_image = logits_per_text.t()
1128
+
1129
+ loss = None
1130
+ if return_loss:
1131
+ loss = clipseg_loss(logits_per_text)
1132
+
1133
+ if not return_dict:
1134
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1135
+ return ((loss,) + output) if loss is not None else output
1136
+
1137
+ return CLIPSegOutput(
1138
+ loss=loss,
1139
+ logits_per_image=logits_per_image,
1140
+ logits_per_text=logits_per_text,
1141
+ text_embeds=text_embeds,
1142
+ image_embeds=image_embeds,
1143
+ text_model_output=text_outputs,
1144
+ vision_model_output=vision_outputs,
1145
+ )
1146
+
1147
+
1148
+ class CLIPSegDecoderLayer(nn.Module):
1149
+ """
1150
+ CLIPSeg decoder layer, which is identical to `CLIPSegEncoderLayer`, except that normalization is applied after
1151
+ self-attention/MLP, rather than before.
1152
+ """
1153
+
1154
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer.__init__ with CLIP->CLIPSeg
1155
+ def __init__(self, config: CLIPSegConfig):
1156
+ super().__init__()
1157
+ self.embed_dim = config.hidden_size
1158
+ self.self_attn = CLIPSegAttention(config)
1159
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
1160
+ self.mlp = CLIPSegMLP(config)
1161
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
1162
+
1163
+ def forward(
1164
+ self,
1165
+ hidden_states: torch.Tensor,
1166
+ attention_mask: torch.Tensor,
1167
+ causal_attention_mask: torch.Tensor,
1168
+ output_attentions: Optional[bool] = False,
1169
+ ) -> Tuple[torch.FloatTensor]:
1170
+ """
1171
+ Args:
1172
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1173
+ attention_mask (`torch.FloatTensor`): attention mask of size
1174
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
1175
+ `(config.encoder_attention_heads,)`.
1176
+ output_attentions (`bool`, *optional*):
1177
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1178
+ returned tensors for more detail.
1179
+ """
1180
+ residual = hidden_states
1181
+
1182
+ hidden_states, attn_weights = self.self_attn(
1183
+ hidden_states=hidden_states,
1184
+ attention_mask=attention_mask,
1185
+ causal_attention_mask=causal_attention_mask,
1186
+ output_attentions=output_attentions,
1187
+ )
1188
+
1189
+ hidden_states = residual + hidden_states
1190
+ hidden_states = self.layer_norm1(hidden_states)
1191
+
1192
+ residual = hidden_states
1193
+ hidden_states = self.mlp(hidden_states)
1194
+ hidden_states = residual + hidden_states
1195
+ hidden_states = self.layer_norm2(hidden_states)
1196
+
1197
+ outputs = (hidden_states,)
1198
+
1199
+ if output_attentions:
1200
+ outputs += (attn_weights,)
1201
+
1202
+ return outputs
1203
+
1204
+
1205
+ class CLIPSegDecoder(CLIPSegPreTrainedModel):
1206
+ def __init__(self, config: CLIPSegConfig):
1207
+ super().__init__(config)
1208
+
1209
+ self.conditional_layer = config.conditional_layer
1210
+
1211
+ self.film_mul = nn.Linear(config.projection_dim, config.reduce_dim)
1212
+ self.film_add = nn.Linear(config.projection_dim, config.reduce_dim)
1213
+
1214
+ if config.use_complex_transposed_convolution:
1215
+ transposed_kernels = (config.vision_config.patch_size // 4, config.vision_config.patch_size // 4)
1216
+
1217
+ self.transposed_convolution = nn.Sequential(
1218
+ nn.Conv2d(config.reduce_dim, config.reduce_dim, kernel_size=3, padding=1),
1219
+ nn.ReLU(),
1220
+ nn.ConvTranspose2d(
1221
+ config.reduce_dim,
1222
+ config.reduce_dim // 2,
1223
+ kernel_size=transposed_kernels[0],
1224
+ stride=transposed_kernels[0],
1225
+ ),
1226
+ nn.ReLU(),
1227
+ nn.ConvTranspose2d(
1228
+ config.reduce_dim // 2, 1, kernel_size=transposed_kernels[1], stride=transposed_kernels[1]
1229
+ ),
1230
+ )
1231
+ else:
1232
+ self.transposed_convolution = nn.ConvTranspose2d(
1233
+ config.reduce_dim, 1, config.vision_config.patch_size, stride=config.vision_config.patch_size
1234
+ )
1235
+
1236
+ depth = len(config.extract_layers)
1237
+ self.reduces = nn.ModuleList(
1238
+ [nn.Linear(config.vision_config.hidden_size, config.reduce_dim) for _ in range(depth)]
1239
+ )
1240
+
1241
+ decoder_config = copy.deepcopy(config.vision_config)
1242
+ decoder_config.hidden_size = config.reduce_dim
1243
+ decoder_config.num_attention_heads = config.decoder_num_attention_heads
1244
+ decoder_config.intermediate_size = config.decoder_intermediate_size
1245
+ decoder_config.hidden_act = "relu"
1246
+ self.layers = nn.ModuleList([CLIPSegDecoderLayer(decoder_config) for _ in range(len(config.extract_layers))])
1247
+
1248
+ def forward(
1249
+ self,
1250
+ hidden_states: Tuple[torch.Tensor],
1251
+ conditional_embeddings: torch.Tensor,
1252
+ output_attentions: Optional[bool] = None,
1253
+ output_hidden_states: Optional[bool] = None,
1254
+ return_dict: Optional[bool] = True,
1255
+ ):
1256
+ all_hidden_states = () if output_hidden_states else None
1257
+ all_attentions = () if output_attentions else None
1258
+
1259
+ activations = hidden_states[::-1]
1260
+
1261
+ output = None
1262
+ for i, (activation, layer, reduce) in enumerate(zip(activations, self.layers, self.reduces)):
1263
+ if output is not None:
1264
+ output = reduce(activation) + output
1265
+ else:
1266
+ output = reduce(activation)
1267
+
1268
+ if i == self.conditional_layer:
1269
+ output = self.film_mul(conditional_embeddings) * output.permute(1, 0, 2) + self.film_add(
1270
+ conditional_embeddings
1271
+ )
1272
+ output = output.permute(1, 0, 2)
1273
+
1274
+ layer_outputs = layer(
1275
+ output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions
1276
+ )
1277
+
1278
+ output = layer_outputs[0]
1279
+
1280
+ if output_hidden_states:
1281
+ all_hidden_states += (output,)
1282
+
1283
+ if output_attentions:
1284
+ all_attentions += (layer_outputs[1],)
1285
+
1286
+ output = output[:, 1:, :].permute(0, 2, 1) # remove cls token and reshape to [batch_size, reduce_dim, seq_len]
1287
+
1288
+ size = int(math.sqrt(output.shape[2]))
1289
+
1290
+ batch_size = conditional_embeddings.shape[0]
1291
+ output = output.view(batch_size, output.shape[1], size, size)
1292
+
1293
+ logits = self.transposed_convolution(output).squeeze(1)
1294
+
1295
+ if not return_dict:
1296
+ return tuple(v for v in [logits, all_hidden_states, all_attentions] if v is not None)
1297
+
1298
+ return CLIPSegDecoderOutput(
1299
+ logits=logits,
1300
+ hidden_states=all_hidden_states,
1301
+ attentions=all_attentions,
1302
+ )
1303
+
1304
+
1305
+ @add_start_docstrings(
1306
+ """
1307
+ CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation.
1308
+ """,
1309
+ CLIPSEG_START_DOCSTRING,
1310
+ )
1311
+ class CLIPSegForImageSegmentation(CLIPSegPreTrainedModel):
1312
+ config_class = CLIPSegConfig
1313
+
1314
+ def __init__(self, config: CLIPSegConfig):
1315
+ super().__init__(config)
1316
+
1317
+ self.config = config
1318
+
1319
+ self.clip = CLIPSegModel(config)
1320
+ self.extract_layers = config.extract_layers
1321
+
1322
+ self.decoder = CLIPSegDecoder(config)
1323
+
1324
+ # Initialize weights and apply final processing
1325
+ self.post_init()
1326
+
1327
+ def get_conditional_embeddings(
1328
+ self,
1329
+ batch_size: int = None,
1330
+ input_ids: Optional[torch.Tensor] = None,
1331
+ attention_mask: Optional[torch.Tensor] = None,
1332
+ position_ids: Optional[torch.Tensor] = None,
1333
+ conditional_pixel_values: Optional[torch.Tensor] = None,
1334
+ ):
1335
+ if input_ids is not None:
1336
+ # compute conditional embeddings from texts
1337
+ if len(input_ids) != batch_size:
1338
+ raise ValueError("Make sure to pass as many prompt texts as there are query images")
1339
+ with torch.no_grad():
1340
+ conditional_embeddings = self.clip.get_text_features(
1341
+ input_ids, attention_mask=attention_mask, position_ids=position_ids
1342
+ )
1343
+ elif conditional_pixel_values is not None:
1344
+ # compute conditional embeddings from images
1345
+ if len(conditional_pixel_values) != batch_size:
1346
+ raise ValueError("Make sure to pass as many prompt images as there are query images")
1347
+ with torch.no_grad():
1348
+ conditional_embeddings = self.clip.get_image_features(conditional_pixel_values)
1349
+ else:
1350
+ raise ValueError(
1351
+ "Invalid conditional, should be either provided as `input_ids` or `conditional_pixel_values`"
1352
+ )
1353
+
1354
+ return conditional_embeddings
1355
+
1356
+ @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING)
1357
+ @replace_return_docstrings(output_type=CLIPSegImageSegmentationOutput, config_class=CLIPSegTextConfig)
1358
+ def forward(
1359
+ self,
1360
+ input_ids: Optional[torch.FloatTensor] = None,
1361
+ pixel_values: Optional[torch.FloatTensor] = None,
1362
+ conditional_pixel_values: Optional[torch.FloatTensor] = None,
1363
+ conditional_embeddings: Optional[torch.FloatTensor] = None,
1364
+ attention_mask: Optional[torch.Tensor] = None,
1365
+ position_ids: Optional[torch.LongTensor] = None,
1366
+ labels: Optional[torch.LongTensor] = None,
1367
+ output_attentions: Optional[bool] = None,
1368
+ output_hidden_states: Optional[bool] = None,
1369
+ return_dict: Optional[bool] = None,
1370
+ ) -> Union[Tuple, CLIPSegOutput]:
1371
+ r"""
1372
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1373
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1374
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1375
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1376
+
1377
+ Returns:
1378
+
1379
+ Examples:
1380
+
1381
+ ```python
1382
+ >>> from transformers import AutoProcessor, CLIPSegForImageSegmentation
1383
+ >>> from PIL import Image
1384
+ >>> import requests
1385
+
1386
+ >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
1387
+ >>> model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
1388
+
1389
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1390
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1391
+ >>> texts = ["a cat", "a remote", "a blanket"]
1392
+ >>> inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt")
1393
+
1394
+ >>> outputs = model(**inputs)
1395
+
1396
+ >>> logits = outputs.logits
1397
+ >>> print(logits.shape)
1398
+ torch.Size([3, 352, 352])
1399
+ ```"""
1400
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1401
+
1402
+ # step 1: forward the query images through the frozen CLIP vision encoder
1403
+ with torch.no_grad():
1404
+ vision_outputs = self.clip.vision_model(
1405
+ pixel_values=pixel_values,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=True, # we need the intermediate hidden states
1408
+ return_dict=return_dict,
1409
+ )
1410
+ pooled_output = self.clip.visual_projection(vision_outputs[1])
1411
+
1412
+ hidden_states = vision_outputs.hidden_states if return_dict else vision_outputs[2]
1413
+ # we add +1 here as the hidden states also include the initial embeddings
1414
+ activations = [hidden_states[i + 1] for i in self.extract_layers]
1415
+
1416
+ # update vision_outputs
1417
+ if return_dict:
1418
+ vision_outputs = BaseModelOutputWithPooling(
1419
+ last_hidden_state=vision_outputs.last_hidden_state,
1420
+ pooler_output=vision_outputs.pooler_output,
1421
+ hidden_states=vision_outputs.hidden_states if output_hidden_states else None,
1422
+ attentions=vision_outputs.attentions,
1423
+ )
1424
+ else:
1425
+ vision_outputs = (
1426
+ vision_outputs[:2] + vision_outputs[3:] if not output_hidden_states else vision_outputs
1427
+ )
1428
+
1429
+ # step 2: compute conditional embeddings, either from text, images or an own provided embedding
1430
+ if conditional_embeddings is None:
1431
+ conditional_embeddings = self.get_conditional_embeddings(
1432
+ batch_size=pixel_values.shape[0],
1433
+ input_ids=input_ids,
1434
+ attention_mask=attention_mask,
1435
+ position_ids=position_ids,
1436
+ conditional_pixel_values=conditional_pixel_values,
1437
+ )
1438
+ else:
1439
+ if conditional_embeddings.shape[0] != pixel_values.shape[0]:
1440
+ raise ValueError(
1441
+ "Make sure to pass as many conditional embeddings as there are query images in the batch"
1442
+ )
1443
+ if conditional_embeddings.shape[1] != self.config.projection_dim:
1444
+ raise ValueError(
1445
+ "Make sure that the feature dimension of the conditional embeddings matches"
1446
+ " `config.projection_dim`."
1447
+ )
1448
+
1449
+ # step 3: forward both the pooled output and the activations through the lightweight decoder to predict masks
1450
+ decoder_outputs = self.decoder(
1451
+ activations,
1452
+ conditional_embeddings,
1453
+ output_attentions=output_attentions,
1454
+ output_hidden_states=output_hidden_states,
1455
+ return_dict=return_dict,
1456
+ )
1457
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
1458
+
1459
+ loss = None
1460
+ if labels is not None:
1461
+ # move labels to the correct device to enable PP
1462
+ labels = labels.to(logits.device)
1463
+ loss_fn = nn.BCEWithLogitsLoss()
1464
+ loss = loss_fn(logits, labels)
1465
+
1466
+ if not return_dict:
1467
+ output = (logits, conditional_embeddings, pooled_output, vision_outputs, decoder_outputs)
1468
+ return ((loss,) + output) if loss is not None else output
1469
+
1470
+ return CLIPSegImageSegmentationOutput(
1471
+ loss=loss,
1472
+ logits=logits,
1473
+ conditional_embeddings=conditional_embeddings,
1474
+ pooled_output=pooled_output,
1475
+ vision_model_output=vision_outputs,
1476
+ decoder_output=decoder_outputs,
1477
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/clipseg/processing_clipseg.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for CLIPSeg
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding
23
+
24
+
25
+ class CLIPSegProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor.
28
+
29
+ [`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the
30
+ [`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information.
31
+
32
+ Args:
33
+ image_processor ([`ViTImageProcessor`], *optional*):
34
+ The image processor is a required input.
35
+ tokenizer ([`CLIPTokenizerFast`], *optional*):
36
+ The tokenizer is a required input.
37
+ """
38
+
39
+ attributes = ["image_processor", "tokenizer"]
40
+ image_processor_class = "ViTImageProcessor"
41
+ tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
42
+
43
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
44
+ feature_extractor = None
45
+ if "feature_extractor" in kwargs:
46
+ warnings.warn(
47
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
48
+ " instead.",
49
+ FutureWarning,
50
+ )
51
+ feature_extractor = kwargs.pop("feature_extractor")
52
+
53
+ image_processor = image_processor if image_processor is not None else feature_extractor
54
+ if image_processor is None:
55
+ raise ValueError("You need to specify an `image_processor`.")
56
+ if tokenizer is None:
57
+ raise ValueError("You need to specify a `tokenizer`.")
58
+
59
+ super().__init__(image_processor, tokenizer)
60
+
61
+ def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs):
62
+ """
63
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
64
+ and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode
65
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
66
+ ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of
67
+ the above two methods for more information.
68
+
69
+ Args:
70
+ text (`str`, `List[str]`, `List[List[str]]`):
71
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
72
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
73
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
74
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
75
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
76
+ tensor. Both channels-first and channels-last formats are supported.
77
+ visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
78
+ The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image,
79
+ NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape
80
+ (C, H, W), where C is a number of channels, H and W are image height and width.
81
+
82
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
83
+ If set, will return tensors of a particular framework. Acceptable values are:
84
+
85
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
86
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
87
+ - `'np'`: Return NumPy `np.ndarray` objects.
88
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
89
+
90
+ Returns:
91
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
92
+
93
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
94
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
95
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
96
+ `None`).
97
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
98
+ """
99
+ if text is None and visual_prompt is None and images is None:
100
+ raise ValueError("You have to specify either text, visual prompt or images.")
101
+
102
+ if text is not None and visual_prompt is not None:
103
+ raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.")
104
+
105
+ if text is not None:
106
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
107
+
108
+ if visual_prompt is not None:
109
+ prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs)
110
+
111
+ if images is not None:
112
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
113
+
114
+ if visual_prompt is not None and images is not None:
115
+ encoding = {
116
+ "pixel_values": image_features.pixel_values,
117
+ "conditional_pixel_values": prompt_features.pixel_values,
118
+ }
119
+ return encoding
120
+ elif text is not None and images is not None:
121
+ encoding["pixel_values"] = image_features.pixel_values
122
+ return encoding
123
+ elif text is not None:
124
+ return encoding
125
+ elif visual_prompt is not None:
126
+ encoding = {
127
+ "conditional_pixel_values": prompt_features.pixel_values,
128
+ }
129
+ return encoding
130
+ else:
131
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
132
+
133
+ def batch_decode(self, *args, **kwargs):
134
+ """
135
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
136
+ refer to the docstring of this method for more information.
137
+ """
138
+ return self.tokenizer.batch_decode(*args, **kwargs)
139
+
140
+ def decode(self, *args, **kwargs):
141
+ """
142
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
143
+ the docstring of this method for more information.
144
+ """
145
+ return self.tokenizer.decode(*args, **kwargs)
146
+
147
+ @property
148
+ def feature_extractor_class(self):
149
+ warnings.warn(
150
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
151
+ FutureWarning,
152
+ )
153
+ return self.image_processor_class
154
+
155
+ @property
156
+ def feature_extractor(self):
157
+ warnings.warn(
158
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
159
+ FutureWarning,
160
+ )
161
+ return self.image_processor
llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+
18
+ import torch
19
+
20
+ from transformers.utils import WEIGHTS_NAME
21
+
22
+
23
+ DIALOGPT_MODELS = ["small", "medium", "large"]
24
+
25
+ OLD_KEY = "lm_head.decoder.weight"
26
+ NEW_KEY = "lm_head.weight"
27
+
28
+
29
+ def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str):
30
+ d = torch.load(checkpoint_path)
31
+ d[NEW_KEY] = d.pop(OLD_KEY)
32
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
33
+ torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME))
34
+
35
+
36
+ if __name__ == "__main__":
37
+ parser = argparse.ArgumentParser()
38
+ parser.add_argument("--dialogpt_path", default=".", type=str)
39
+ args = parser.parse_args()
40
+ for MODEL in DIALOGPT_MODELS:
41
+ checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
42
+ pytorch_dump_folder_path = f"./DialoGPT-{MODEL}"
43
+ convert_dialogpt_checkpoint(
44
+ checkpoint_path,
45
+ pytorch_dump_folder_path,
46
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"]
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ from .tokenization_gpt_sw3 import GPTSw3Tokenizer
39
+
40
+ else:
41
+ import sys
42
+
43
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (700 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team and the AI-Sweden team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ Convert GPT-SW3 megatron checkpoints to pytorch"""
15
+
16
+ import argparse
17
+ import os
18
+ from os.path import isfile
19
+
20
+ import torch
21
+
22
+ from transformers import GPT2Config
23
+
24
+
25
+ def recursive_print(name, val, spaces=0):
26
+ # Format the message.
27
+ if name is None:
28
+ msg = None
29
+ else:
30
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
31
+ msg = fmt.format(name)
32
+
33
+ # Print and recurse (if needed).
34
+ if isinstance(val, dict):
35
+ if msg is not None:
36
+ print(msg)
37
+ for k in val.keys():
38
+ recursive_print(k, val[k], spaces + 2)
39
+ elif isinstance(val, torch.Tensor):
40
+ print(msg, ":", val.size())
41
+ else:
42
+ print(msg, ":", val)
43
+
44
+
45
+ def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size):
46
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
47
+ # for compatibility with later versions of NVIDIA Megatron-LM.
48
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
49
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
50
+ # If param is the weight tensor of the self-attention block, the returned tensor
51
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
52
+ input_shape = param.size()
53
+ # other versions store [num_heads * num_splits * hidden_size, :]
54
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
55
+ param = param.view(*saved_shape)
56
+ param = param.transpose(0, 1).contiguous()
57
+ param = param.view(*input_shape)
58
+ return param
59
+
60
+
61
+ def convert_megatron_checkpoint(sd_megatron, config):
62
+ """
63
+ Converts a Megatron checkpoint to a HuggingFace GPT-SW3 checkpoint.
64
+ """
65
+ n_positions = config.n_positions
66
+ layers = config.n_layer
67
+ vocab_size = config.vocab_size
68
+ heads = config.n_head
69
+ hidden_size_per_head = config.n_embd // config.n_head
70
+
71
+ word_embeddings = sd_megatron["model.language_model.embedding.word_embeddings.weight"][:vocab_size, :]
72
+ sd_hf = {
73
+ "transformer.wte.weight": word_embeddings,
74
+ "transformer.wpe.weight": sd_megatron["model.language_model.embedding.position_embeddings.weight"],
75
+ "transformer.ln_f.weight": sd_megatron["model.language_model.encoder.final_layernorm.weight"],
76
+ "transformer.ln_f.bias": sd_megatron["model.language_model.encoder.final_layernorm.bias"],
77
+ }
78
+
79
+ pf = "model.language_model.encoder.layers."
80
+ for i in range(layers):
81
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool))
82
+ causal_mask = causal_mask.view(1, 1, n_positions, n_positions)
83
+ sd_hf[f"transformer.h.{i}.attn.bias"] = causal_mask
84
+ sd_hf[f"transformer.h.{i}.attn.masked_bias"] = torch.tensor(-1e4, dtype=torch.bfloat16)
85
+
86
+ sd_hf[f"transformer.h.{i}.ln_1.weight"] = sd_megatron[f"{pf}{i}.input_layernorm.weight"]
87
+ sd_hf[f"transformer.h.{i}.ln_1.bias"] = sd_megatron[f"{pf}{i}.input_layernorm.bias"]
88
+
89
+ val1 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.weight"]
90
+ val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head)
91
+ sd_hf[f"transformer.h.{i}.attn.c_attn.weight"] = val1.transpose(0, 1).contiguous()
92
+
93
+ val2 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.bias"]
94
+ val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head)
95
+ sd_hf[f"transformer.h.{i}.attn.c_attn.bias"] = val2
96
+
97
+ sd_hf[f"transformer.h.{i}.attn.c_proj.weight"] = sd_megatron[f"{pf}{i}.self_attention.dense.weight"].transpose(
98
+ 0, 1
99
+ )
100
+ sd_hf[f"transformer.h.{i}.attn.c_proj.bias"] = sd_megatron[f"{pf}{i}.self_attention.dense.bias"]
101
+ sd_hf[f"transformer.h.{i}.ln_2.weight"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.weight"]
102
+ sd_hf[f"transformer.h.{i}.ln_2.bias"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.bias"]
103
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.weight"].transpose(0, 1)
104
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.bias"]
105
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.weight"].transpose(
106
+ 0, 1
107
+ )
108
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.bias"]
109
+
110
+ # For LM head, transformers' wants the matrix to weight embeddings.
111
+ sd_hf["lm_head.weight"] = word_embeddings
112
+
113
+ return sd_hf
114
+
115
+
116
+ def copy_config(config_hf, config_megatron):
117
+ """Copy the config from Megatron to hf."""
118
+ config_hf.vocab_size = 64000
119
+ config_hf.n_positions = config_megatron["encoder_seq_length"]
120
+ config_hf.n_embd = config_megatron["hidden_size"]
121
+ config_hf.n_layer = config_megatron["num_layers"]
122
+ config_hf.n_head = config_megatron["num_attention_heads"]
123
+ config_hf.n_inner = config_megatron["ffn_hidden_size"]
124
+ config_hf.activation_function = "gelu"
125
+ config_hf.resid_pdrop = 0.1
126
+ config_hf.embd_pdrop = 0.1
127
+ config_hf.attn_pdrop = 0.1
128
+ config_hf.layer_norm_epsilon = config_megatron["layernorm_epsilon"] # 1e-5
129
+ config_hf.initializer_range = config_megatron["init_method_std"] # 0.02
130
+ config_hf.apply_query_key_layer_scaling = config_megatron["apply_query_key_layer_scaling"] # True
131
+ config_hf.normalize_attention_scores = True
132
+ config_hf.use_cache = True
133
+
134
+ # This identifies the 6.7B (7B) model which uses a different tokenizer
135
+ if config_megatron["hidden_size"] == 4096:
136
+ config_hf.bos_token_id = 1 # <|endoftext|>
137
+ config_hf.eos_token_id = 1 # <|endoftext|>
138
+ config_hf.pad_token_id = 0 # <unk>
139
+ else:
140
+ config_hf.bos_token_id = 2 # <s>
141
+ config_hf.eos_token_id = 3 # <|endoftext|>
142
+ config_hf.pad_token_id = 0 # <pad>
143
+
144
+ return config_hf
145
+
146
+
147
+ def main(args):
148
+ print(args)
149
+
150
+ checkpoint_path = args.checkpoint_path
151
+ save_path = args.save_path
152
+ if isfile(checkpoint_path):
153
+ raise FileNotFoundError(f"ERROR! could not find file {checkpoint_path}")
154
+
155
+ # Load the model.
156
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
157
+
158
+ # Load the config.
159
+ config_megatron = checkpoint["hyper_parameters"]["cfg"]
160
+ config_hf = GPT2Config()
161
+ config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron)
162
+ config_hf.architectures = ["GPT2LMHeadModel"]
163
+
164
+ sd_megatron = checkpoint["state_dict"]
165
+
166
+ # Convert.
167
+ print("Converting")
168
+ sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf)
169
+
170
+ # Print the structure of converted state dict.
171
+ if args.print_checkpoint_structure:
172
+ recursive_print(None, sd_hf)
173
+
174
+ config_hf.tokenizer_class = "GPTSw3Tokenizer"
175
+
176
+ # Store the config to file.
177
+ print("Saving config")
178
+ config_hf.save_pretrained(save_path)
179
+
180
+ # Store the state_dict to file.
181
+ output_checkpoint_file = os.path.join(save_path, "pytorch_model.bin")
182
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
183
+ torch.save(sd_hf, output_checkpoint_file)
184
+
185
+
186
+ if __name__ == "__main__":
187
+ parser = argparse.ArgumentParser()
188
+ parser.add_argument(
189
+ "--checkpoint_path",
190
+ type=str,
191
+ required=True,
192
+ help="e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000",
193
+ )
194
+ parser.add_argument("--save_path", type=str, required=True, help="e.g. /home/user/gpt-sw3/hf")
195
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
196
+ _args = parser.parse_args()
197
+ main(_args)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The tokenizer used by the GPT-SW3 models."""
2
+
3
+ import os
4
+ import re
5
+ import unicodedata
6
+ from shutil import copyfile
7
+ from typing import Any, Dict, List, Optional, Tuple, Union
8
+
9
+ import sentencepiece as spm
10
+
11
+ from ...tokenization_utils import PreTrainedTokenizer
12
+ from ...utils import is_torch_available, logging
13
+
14
+
15
+ if is_torch_available():
16
+ import torch
17
+
18
+
19
+ logger = logging.get_logger(__name__)
20
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
21
+
22
+
23
+ class GPTSw3Tokenizer(PreTrainedTokenizer):
24
+ """
25
+ Construct an GPTSw3 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
26
+
27
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
28
+ this superclass for more information regarding those methods.
29
+
30
+ Example usage:
31
+ ```python
32
+ >>> from transformers import GPTSw3Tokenizer
33
+
34
+ >>> tokenizer = GPTSw3Tokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-126m")
35
+ >>> tokenizer("Svenska är kul!")["input_ids"]
36
+ [1814, 377, 3617, 63504]
37
+ ```
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
42
+ contains the vocabulary necessary to instantiate a tokenizer.
43
+ do_lower_case (`bool`, *optional*, defaults to `False`):
44
+ Whether or not to lowercase the input when tokenizing.
45
+ remove_space (`bool`, *optional*, defaults to `False`):
46
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
47
+ keep_accents (`bool`, *optional*, defaults to `False`):
48
+ Whether or not to keep accents when tokenizing.
49
+ pad_token (`str`, *optional*):
50
+ The token used for padding, for example when batching sequences of different lengths. If not provided, will
51
+ default to '<pad>' or '<unk>' depending on model size.
52
+ unk_token (`str`, *optional*):
53
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
54
+ token instead. If not provided, will default to '<unk>'.
55
+ eos_token (`str`, *optional*):
56
+ The end of sequence token seen during pretraining. If not provided, will default to '<|endoftext|>'
57
+ bos_token (`str`, *optional*):
58
+ The beginning of sequence token that can be used for downstream task, was not seen during pretraining. If
59
+ not provided, will default to '<s>' or '<|endoftext|>', depending on model size.
60
+ sp_model_kwargs (`dict`, *optional*):
61
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
62
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
63
+ to set:
64
+
65
+ - `enable_sampling`: Enable subword regularization.
66
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
67
+
68
+ - `nbest_size = {0,1}`: No sampling is performed.
69
+ - `nbest_size > 1`: samples from the nbest_size results.
70
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
71
+ using forward-filtering-and-backward-sampling algorithm.
72
+
73
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
74
+ BPE-dropout.
75
+
76
+ Attributes:
77
+ sp_model (`SentencePieceProcessor`):
78
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
79
+ whitespaces (`set`):
80
+ The whitespaces that are replaced in the whitespace normalization in preprocessing.
81
+ non_printing_characters_re (`Pattern`):
82
+ The compiled regular expression to remove non-printing characters in preprocessing.
83
+ """
84
+
85
+ vocab_files_names = VOCAB_FILES_NAMES
86
+ model_input_names = ["input_ids", "attention_mask"]
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_file,
91
+ do_lower_case=False,
92
+ remove_space=False,
93
+ keep_accents=False,
94
+ pad_token=None,
95
+ unk_token=None,
96
+ eos_token=None,
97
+ bos_token=None,
98
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
99
+ **kwargs,
100
+ ) -> None:
101
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
102
+
103
+ name_or_path = kwargs.get("name_or_path")
104
+ if name_or_path is None:
105
+ logger.warning(
106
+ "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
107
+ " you are testing the model, this can safely be ignored"
108
+ )
109
+ name_or_path = "None"
110
+
111
+ # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
112
+ eos_token = "<|endoftext|>" if eos_token is None else eos_token
113
+ unk_token = "<unk>" if unk_token is None else unk_token
114
+ if "gpt-sw3-7b" in name_or_path:
115
+ pad_token = unk_token if pad_token is None else pad_token
116
+ bos_token = eos_token if bos_token is None else bos_token
117
+ else:
118
+ pad_token = "<pad>" if pad_token is None else pad_token
119
+ bos_token = "<s>" if bos_token is None else bos_token
120
+
121
+ self.do_lower_case = do_lower_case
122
+ self.remove_space = remove_space
123
+ self.keep_accents = keep_accents
124
+ self.vocab_file = vocab_file
125
+
126
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
127
+ self.sp_model.Load(vocab_file)
128
+
129
+ # Used for whitespace normalization in input texts
130
+ # fmt : off
131
+ self.whitespaces = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"}
132
+ # fmt : on
133
+
134
+ # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
135
+ self.non_printing_characters_re = re.compile(
136
+ f"[{''.join(map(chr, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]"
137
+ )
138
+
139
+ super().__init__(
140
+ do_lower_case=do_lower_case,
141
+ remove_space=remove_space,
142
+ keep_accents=keep_accents,
143
+ bos_token=bos_token,
144
+ eos_token=eos_token,
145
+ unk_token=unk_token,
146
+ pad_token=pad_token,
147
+ sp_model_kwargs=self.sp_model_kwargs,
148
+ **kwargs,
149
+ )
150
+
151
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__getstate__
152
+ def __getstate__(self):
153
+ state = self.__dict__.copy()
154
+ state["sp_model"] = None
155
+ return state
156
+
157
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__setstate__
158
+ def __setstate__(self, d):
159
+ self.__dict__ = d
160
+
161
+ # for backward compatibility
162
+ if not hasattr(self, "sp_model_kwargs"):
163
+ self.sp_model_kwargs = {}
164
+
165
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
166
+ self.sp_model.Load(self.vocab_file)
167
+
168
+ @property
169
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
170
+ def vocab_size(self) -> int:
171
+ return len(self.sp_model)
172
+
173
+ def preprocess_text(self, text: str) -> str:
174
+ """
175
+ Returns the preprocessed text. This procedure is identical to what was used when training the tokenizer.
176
+ """
177
+
178
+ # Remove non-printing characters
179
+ text = self.non_printing_characters_re.sub("", text)
180
+
181
+ # Normalize whitespaces
182
+ text = "".join([char if char not in self.whitespaces else " " for char in text])
183
+
184
+ # NFC Unicode normalization
185
+ text = unicodedata.normalize("NFC", text)
186
+ return text
187
+
188
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
189
+ text = self.preprocess_text(text)
190
+ return self.sp_model.encode(text, out_type=str)
191
+
192
+ def _convert_token_to_id(self, token: str) -> int:
193
+ """Converts a token (str) to an id (int) using the vocab."""
194
+ return self.sp_model.PieceToId(token)
195
+
196
+ def _convert_id_to_token(self, index: int) -> str:
197
+ """Converts an index (int) to a token (str) using the vocab."""
198
+ return self.sp_model.IdToPiece(index)
199
+
200
+ @staticmethod
201
+ def clean_up_tokenization(out_string: str) -> str:
202
+ """Returns the input string, this function is overridden to remove the default clean up."""
203
+ return out_string
204
+
205
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
206
+ """Converts a sequence of tokens (strings) to a single string. Special tokens remain intact."""
207
+ current_sub_tokens = []
208
+ out_string = ""
209
+ prev_is_special = False
210
+ for token in tokens:
211
+ # make sure that special tokens are not decoded using sentencepiece model
212
+ if token in self.all_special_tokens:
213
+ # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
214
+ if not prev_is_special:
215
+ out_string += " "
216
+
217
+ out_string += self.sp_model.decode(current_sub_tokens) + token
218
+ prev_is_special = True
219
+ current_sub_tokens = []
220
+ else:
221
+ current_sub_tokens.append(token)
222
+ prev_is_special = False
223
+ out_string += self.sp_model.decode(current_sub_tokens)
224
+
225
+ return out_string
226
+
227
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.get_vocab
228
+ def get_vocab(self) -> Dict[str, int]:
229
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
230
+ vocab.update(self.added_tokens_encoder)
231
+ return vocab
232
+
233
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.save_vocabulary
234
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
235
+ if not os.path.isdir(save_directory):
236
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
237
+ return
238
+ out_vocab_file = os.path.join(
239
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
240
+ )
241
+
242
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
243
+ copyfile(self.vocab_file, out_vocab_file)
244
+ elif not os.path.isfile(self.vocab_file):
245
+ with open(out_vocab_file, "wb") as fi:
246
+ content_spiece_model = self.sp_model.serialized_model_proto()
247
+ fi.write(content_spiece_model)
248
+
249
+ return (out_vocab_file,)
250
+
251
+ def encode_fast(
252
+ self, text: Union[str, List[str]], return_tensors: Union[str, bool] = False
253
+ ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
254
+ """
255
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
256
+ functionality but is often much faster.
257
+
258
+ Does NOT handle special tokens correctly, these can manually be added as ids afterwards.
259
+
260
+ Does NOT support padding, these can manually be added as ids afterwards.
261
+
262
+ Use default HuggingFace tokenization methods for full functionality.
263
+
264
+ Args:
265
+ text (`str` or `List[str]`): One or several text(s) to convert to token ids.
266
+ return_tensors (`str` or `bool`): Returns PyTorch tensors if set to True or "pt"
267
+
268
+ Returns:
269
+ `List[int]`, `List[List[int]]`, or `torch.Tensor`: The encoded text(s) as token ids.
270
+ """
271
+
272
+ if isinstance(text, str):
273
+ text = self.preprocess_text(text)
274
+ token_ids = self.sp_model.encode(text)
275
+ else:
276
+ text = [self.preprocess_text(t) for t in text]
277
+ token_ids = self.sp_model.encode(text)
278
+
279
+ if return_tensors is True or return_tensors == "pt":
280
+ token_ids = torch.tensor(token_ids)
281
+
282
+ return token_ids
283
+
284
+ def decode_fast(self, token_ids: Union[int, List[int]]) -> str:
285
+ """
286
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
287
+ functionality but is often much faster.
288
+
289
+ Args:
290
+ token_ids (`int` or `List[int]`): Encoded token or text as token id(s).
291
+
292
+ Returns:
293
+ `str`: Decoded text
294
+ """
295
+
296
+ return self.sp_model.decode(token_ids)
297
+
298
+ @property
299
+ def default_chat_template(self):
300
+ """
301
+ This chat template formats messages like an instant messenger chat log, with "User:" and "Bot:" strings
302
+ preceding messages. BOS tokens are added between all messages.
303
+ """
304
+ logger.warning_once(
305
+ "\nNo chat template is defined for this tokenizer - using the default template "
306
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
307
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
308
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
309
+ )
310
+ return (
311
+ "{{ eos_token }}{{ bos_token }}"
312
+ "{% for message in messages %}"
313
+ "{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}"
314
+ "{% else %}{{ 'Bot: ' + message['content']}}{% endif %}"
315
+ "{{ message['text'] }}{{ bos_token }}"
316
+ "{% endfor %}"
317
+ "Bot:"
318
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_graphormer"] = [
30
+ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "GraphormerForGraphClassification",
32
+ "GraphormerModel",
33
+ "GraphormerPreTrainedModel",
34
+ ]
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .modeling_graphormer import (
47
+ GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
48
+ GraphormerForGraphClassification,
49
+ GraphormerModel,
50
+ GraphormerPreTrainedModel,
51
+ )
52
+
53
+
54
+ else:
55
+ import sys
56
+
57
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (996 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/collating_graphormer.cpython-310.pyc ADDED
Binary file (4.75 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/configuration_graphormer.cpython-310.pyc ADDED
Binary file (9.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/__pycache__/modeling_graphormer.cpython-310.pyc ADDED
Binary file (25.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/algos_graphormer.pyx ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation and HuggingFace
2
+ # Licensed under the MIT License.
3
+
4
+ import cython
5
+
6
+ cimport numpy
7
+ from cython.parallel cimport parallel, prange
8
+
9
+ import numpy as np
10
+
11
+
12
+ # Reduce this number if matrices are too big for large graphs
13
+ UNREACHABLE_NODE_DISTANCE = 510
14
+
15
+ def floyd_warshall(adjacency_matrix):
16
+ """
17
+ Applies the Floyd-Warshall algorithm to the adjacency matrix, to compute the
18
+ shortest paths distance between all nodes, up to UNREACHABLE_NODE_DISTANCE.
19
+ """
20
+ (nrows, ncols) = adjacency_matrix.shape
21
+ assert nrows == ncols
22
+ cdef unsigned int n = nrows
23
+
24
+ adj_mat_copy = adjacency_matrix.astype(np.int32, order='C', casting='safe', copy=True)
25
+ assert adj_mat_copy.flags['C_CONTIGUOUS']
26
+ cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] M = adj_mat_copy
27
+ cdef numpy.ndarray[numpy.int32_t, ndim=2, mode='c'] path = -1 * np.ones([n, n], dtype=np.int32)
28
+
29
+ cdef unsigned int i, j, k
30
+ cdef numpy.int32_t M_ij, M_ik, cost_ikkj
31
+ cdef numpy.int32_t* M_ptr = &M[0,0]
32
+ cdef numpy.int32_t* M_i_ptr
33
+ cdef numpy.int32_t* M_k_ptr
34
+
35
+ # set unreachable nodes distance to UNREACHABLE_NODE_DISTANCE
36
+ for i in range(n):
37
+ for j in range(n):
38
+ if i == j:
39
+ M[i][j] = 0
40
+ elif M[i][j] == 0:
41
+ M[i][j] = UNREACHABLE_NODE_DISTANCE
42
+
43
+ # floyed algo
44
+ for k in range(n):
45
+ M_k_ptr = M_ptr + n*k
46
+ for i in range(n):
47
+ M_i_ptr = M_ptr + n*i
48
+ M_ik = M_i_ptr[k]
49
+ for j in range(n):
50
+ cost_ikkj = M_ik + M_k_ptr[j]
51
+ M_ij = M_i_ptr[j]
52
+ if M_ij > cost_ikkj:
53
+ M_i_ptr[j] = cost_ikkj
54
+ path[i][j] = k
55
+
56
+ # set unreachable path to UNREACHABLE_NODE_DISTANCE
57
+ for i in range(n):
58
+ for j in range(n):
59
+ if M[i][j] >= UNREACHABLE_NODE_DISTANCE:
60
+ path[i][j] = UNREACHABLE_NODE_DISTANCE
61
+ M[i][j] = UNREACHABLE_NODE_DISTANCE
62
+
63
+ return M, path
64
+
65
+
66
+ def get_all_edges(path, i, j):
67
+ """
68
+ Recursive function to compute all possible paths between two nodes from the graph adjacency matrix.
69
+ """
70
+ cdef int k = path[i][j]
71
+ if k == -1:
72
+ return []
73
+ else:
74
+ return get_all_edges(path, i, k) + [k] + get_all_edges(path, k, j)
75
+
76
+
77
+ def gen_edge_input(max_dist, path, edge_feat):
78
+ """
79
+ Generates the full edge feature and adjacency matrix.
80
+ Shape: num_nodes * num_nodes * max_distance_between_nodes * num_edge_features
81
+ Dim 1 is the input node, dim 2 the output node of the edge, dim 3 the depth of the edge, dim 4 the feature
82
+ """
83
+ (nrows, ncols) = path.shape
84
+ assert nrows == ncols
85
+ cdef unsigned int n = nrows
86
+ cdef unsigned int max_dist_copy = max_dist
87
+
88
+ path_copy = path.astype(long, order='C', casting='safe', copy=True)
89
+ edge_feat_copy = edge_feat.astype(long, order='C', casting='safe', copy=True)
90
+ assert path_copy.flags['C_CONTIGUOUS']
91
+ assert edge_feat_copy.flags['C_CONTIGUOUS']
92
+
93
+ cdef numpy.ndarray[numpy.int32_t, ndim=4, mode='c'] edge_fea_all = -1 * np.ones([n, n, max_dist_copy, edge_feat.shape[-1]], dtype=np.int32)
94
+ cdef unsigned int i, j, k, num_path, cur
95
+
96
+ for i in range(n):
97
+ for j in range(n):
98
+ if i == j:
99
+ continue
100
+ if path_copy[i][j] == UNREACHABLE_NODE_DISTANCE:
101
+ continue
102
+ path = [i] + get_all_edges(path_copy, i, j) + [j]
103
+ num_path = len(path) - 1
104
+ for k in range(num_path):
105
+ edge_fea_all[i, j, k, :] = edge_feat_copy[path[k], path[k+1], :]
106
+
107
+ return edge_fea_all
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/collating_graphormer.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation and HuggingFace
2
+ # Licensed under the MIT License.
3
+
4
+ from typing import Any, Dict, List, Mapping
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ from ...utils import is_cython_available, requires_backends
10
+
11
+
12
+ if is_cython_available():
13
+ import pyximport
14
+
15
+ pyximport.install(setup_args={"include_dirs": np.get_include()})
16
+ from . import algos_graphormer # noqa E402
17
+
18
+
19
+ def convert_to_single_emb(x, offset: int = 512):
20
+ feature_num = x.shape[1] if len(x.shape) > 1 else 1
21
+ feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int64)
22
+ x = x + feature_offset
23
+ return x
24
+
25
+
26
+ def preprocess_item(item, keep_features=True):
27
+ requires_backends(preprocess_item, ["cython"])
28
+
29
+ if keep_features and "edge_attr" in item.keys(): # edge_attr
30
+ edge_attr = np.asarray(item["edge_attr"], dtype=np.int64)
31
+ else:
32
+ edge_attr = np.ones((len(item["edge_index"][0]), 1), dtype=np.int64) # same embedding for all
33
+
34
+ if keep_features and "node_feat" in item.keys(): # input_nodes
35
+ node_feature = np.asarray(item["node_feat"], dtype=np.int64)
36
+ else:
37
+ node_feature = np.ones((item["num_nodes"], 1), dtype=np.int64) # same embedding for all
38
+
39
+ edge_index = np.asarray(item["edge_index"], dtype=np.int64)
40
+
41
+ input_nodes = convert_to_single_emb(node_feature) + 1
42
+ num_nodes = item["num_nodes"]
43
+
44
+ if len(edge_attr.shape) == 1:
45
+ edge_attr = edge_attr[:, None]
46
+ attn_edge_type = np.zeros([num_nodes, num_nodes, edge_attr.shape[-1]], dtype=np.int64)
47
+ attn_edge_type[edge_index[0], edge_index[1]] = convert_to_single_emb(edge_attr) + 1
48
+
49
+ # node adj matrix [num_nodes, num_nodes] bool
50
+ adj = np.zeros([num_nodes, num_nodes], dtype=bool)
51
+ adj[edge_index[0], edge_index[1]] = True
52
+
53
+ shortest_path_result, path = algos_graphormer.floyd_warshall(adj)
54
+ max_dist = np.amax(shortest_path_result)
55
+
56
+ input_edges = algos_graphormer.gen_edge_input(max_dist, path, attn_edge_type)
57
+ attn_bias = np.zeros([num_nodes + 1, num_nodes + 1], dtype=np.single) # with graph token
58
+
59
+ # combine
60
+ item["input_nodes"] = input_nodes + 1 # we shift all indices by one for padding
61
+ item["attn_bias"] = attn_bias
62
+ item["attn_edge_type"] = attn_edge_type
63
+ item["spatial_pos"] = shortest_path_result.astype(np.int64) + 1 # we shift all indices by one for padding
64
+ item["in_degree"] = np.sum(adj, axis=1).reshape(-1) + 1 # we shift all indices by one for padding
65
+ item["out_degree"] = item["in_degree"] # for undirected graph
66
+ item["input_edges"] = input_edges + 1 # we shift all indices by one for padding
67
+ if "labels" not in item:
68
+ item["labels"] = item["y"]
69
+
70
+ return item
71
+
72
+
73
+ class GraphormerDataCollator:
74
+ def __init__(self, spatial_pos_max=20, on_the_fly_processing=False):
75
+ if not is_cython_available():
76
+ raise ImportError("Graphormer preprocessing needs Cython (pyximport)")
77
+
78
+ self.spatial_pos_max = spatial_pos_max
79
+ self.on_the_fly_processing = on_the_fly_processing
80
+
81
+ def __call__(self, features: List[dict]) -> Dict[str, Any]:
82
+ if self.on_the_fly_processing:
83
+ features = [preprocess_item(i) for i in features]
84
+
85
+ if not isinstance(features[0], Mapping):
86
+ features = [vars(f) for f in features]
87
+ batch = {}
88
+
89
+ max_node_num = max(len(i["input_nodes"]) for i in features)
90
+ node_feat_size = len(features[0]["input_nodes"][0])
91
+ edge_feat_size = len(features[0]["attn_edge_type"][0][0])
92
+ max_dist = max(len(i["input_edges"][0][0]) for i in features)
93
+ edge_input_size = len(features[0]["input_edges"][0][0][0])
94
+ batch_size = len(features)
95
+
96
+ batch["attn_bias"] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float)
97
+ batch["attn_edge_type"] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long)
98
+ batch["spatial_pos"] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long)
99
+ batch["in_degree"] = torch.zeros(batch_size, max_node_num, dtype=torch.long)
100
+ batch["input_nodes"] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long)
101
+ batch["input_edges"] = torch.zeros(
102
+ batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long
103
+ )
104
+
105
+ for ix, f in enumerate(features):
106
+ for k in ["attn_bias", "attn_edge_type", "spatial_pos", "in_degree", "input_nodes", "input_edges"]:
107
+ f[k] = torch.tensor(f[k])
108
+
109
+ if len(f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max]) > 0:
110
+ f["attn_bias"][1:, 1:][f["spatial_pos"] >= self.spatial_pos_max] = float("-inf")
111
+
112
+ batch["attn_bias"][ix, : f["attn_bias"].shape[0], : f["attn_bias"].shape[1]] = f["attn_bias"]
113
+ batch["attn_edge_type"][ix, : f["attn_edge_type"].shape[0], : f["attn_edge_type"].shape[1], :] = f[
114
+ "attn_edge_type"
115
+ ]
116
+ batch["spatial_pos"][ix, : f["spatial_pos"].shape[0], : f["spatial_pos"].shape[1]] = f["spatial_pos"]
117
+ batch["in_degree"][ix, : f["in_degree"].shape[0]] = f["in_degree"]
118
+ batch["input_nodes"][ix, : f["input_nodes"].shape[0], :] = f["input_nodes"]
119
+ batch["input_edges"][
120
+ ix, : f["input_edges"].shape[0], : f["input_edges"].shape[1], : f["input_edges"].shape[2], :
121
+ ] = f["input_edges"]
122
+
123
+ batch["out_degree"] = batch["in_degree"]
124
+
125
+ sample = features[0]["labels"]
126
+ if len(sample) == 1: # one task
127
+ if isinstance(sample[0], float): # regression
128
+ batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features]))
129
+ else: # binary classification
130
+ batch["labels"] = torch.from_numpy(np.concatenate([i["labels"] for i in features]))
131
+ else: # multi task classification, left to float to keep the NaNs
132
+ batch["labels"] = torch.from_numpy(np.stack([i["labels"] for i in features], axis=0))
133
+
134
+ return batch
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/configuration_graphormer.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft, clefourrier and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Graphormer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GraphormerConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an
30
+ Graphormer model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the Graphormer
32
+ [graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ num_classes (`int`, *optional*, defaults to 1):
40
+ Number of target classes or labels, set to n for binary classification of n tasks.
41
+ num_atoms (`int`, *optional*, defaults to 512*9):
42
+ Number of node types in the graphs.
43
+ num_edges (`int`, *optional*, defaults to 512*3):
44
+ Number of edges types in the graph.
45
+ num_in_degree (`int`, *optional*, defaults to 512):
46
+ Number of in degrees types in the input graphs.
47
+ num_out_degree (`int`, *optional*, defaults to 512):
48
+ Number of out degrees types in the input graphs.
49
+ num_edge_dis (`int`, *optional*, defaults to 128):
50
+ Number of edge dis in the input graphs.
51
+ multi_hop_max_dist (`int`, *optional*, defaults to 20):
52
+ Maximum distance of multi hop edges between two nodes.
53
+ spatial_pos_max (`int`, *optional*, defaults to 1024):
54
+ Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and
55
+ collation.
56
+ edge_type (`str`, *optional*, defaults to multihop):
57
+ Type of edge relation chosen.
58
+ max_nodes (`int`, *optional*, defaults to 512):
59
+ Maximum number of nodes which can be parsed for the input graphs.
60
+ share_input_output_embed (`bool`, *optional*, defaults to `False`):
61
+ Shares the embedding layer between encoder and decoder - careful, True is not implemented.
62
+ num_layers (`int`, *optional*, defaults to 12):
63
+ Number of layers.
64
+ embedding_dim (`int`, *optional*, defaults to 768):
65
+ Dimension of the embedding layer in encoder.
66
+ ffn_embedding_dim (`int`, *optional*, defaults to 768):
67
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
68
+ num_attention_heads (`int`, *optional*, defaults to 32):
69
+ Number of attention heads in the encoder.
70
+ self_attention (`bool`, *optional*, defaults to `True`):
71
+ Model is self attentive (False not implemented).
72
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
73
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
74
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
75
+ dropout (`float`, *optional*, defaults to 0.1):
76
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
77
+ attention_dropout (`float`, *optional*, defaults to 0.1):
78
+ The dropout probability for the attention weights.
79
+ activation_dropout (`float`, *optional*, defaults to 0.1):
80
+ The dropout probability for the activation of the linear transformer layer.
81
+ layerdrop (`float`, *optional*, defaults to 0.0):
82
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
83
+ for more details.
84
+ bias (`bool`, *optional*, defaults to `True`):
85
+ Uses bias in the attention module - unsupported at the moment.
86
+ embed_scale(`float`, *optional*, defaults to None):
87
+ Scaling factor for the node embeddings.
88
+ num_trans_layers_to_freeze (`int`, *optional*, defaults to 0):
89
+ Number of transformer layers to freeze.
90
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
91
+ Normalize features before encoding the graph.
92
+ pre_layernorm (`bool`, *optional*, defaults to `False`):
93
+ Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be
94
+ used.
95
+ apply_graphormer_init (`bool`, *optional*, defaults to `False`):
96
+ Apply a custom graphormer initialisation to the model before training.
97
+ freeze_embeddings (`bool`, *optional*, defaults to `False`):
98
+ Freeze the embedding layer, or train it along the model.
99
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
100
+ Apply the layer norm before each encoder block.
101
+ q_noise (`float`, *optional*, defaults to 0.0):
102
+ Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For
103
+ more detail, see fairseq's documentation on quant_noise).
104
+ qn_block_size (`int`, *optional*, defaults to 8):
105
+ Size of the blocks for subsequent quantization with iPQ (see q_noise).
106
+ kdim (`int`, *optional*, defaults to None):
107
+ Dimension of the key in the attention, if different from the other values.
108
+ vdim (`int`, *optional*, defaults to None):
109
+ Dimension of the value in the attention, if different from the other values.
110
+ use_cache (`bool`, *optional*, defaults to `True`):
111
+ Whether or not the model should return the last key/values attentions (not used by all models).
112
+ traceable (`bool`, *optional*, defaults to `False`):
113
+ Changes return value of the encoder's inner_state to stacked tensors.
114
+
115
+ Example:
116
+ ```python
117
+ >>> from transformers import GraphormerForGraphClassification, GraphormerConfig
118
+
119
+ >>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration
120
+ >>> configuration = GraphormerConfig()
121
+
122
+ >>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration
123
+ >>> model = GraphormerForGraphClassification(configuration)
124
+
125
+ >>> # Accessing the model configuration
126
+ >>> configuration = model.config
127
+ ```
128
+ """
129
+
130
+ model_type = "graphormer"
131
+ keys_to_ignore_at_inference = ["past_key_values"]
132
+
133
+ def __init__(
134
+ self,
135
+ num_classes: int = 1,
136
+ num_atoms: int = 512 * 9,
137
+ num_edges: int = 512 * 3,
138
+ num_in_degree: int = 512,
139
+ num_out_degree: int = 512,
140
+ num_spatial: int = 512,
141
+ num_edge_dis: int = 128,
142
+ multi_hop_max_dist: int = 5, # sometimes is 20
143
+ spatial_pos_max: int = 1024,
144
+ edge_type: str = "multi_hop",
145
+ max_nodes: int = 512,
146
+ share_input_output_embed: bool = False,
147
+ num_hidden_layers: int = 12,
148
+ embedding_dim: int = 768,
149
+ ffn_embedding_dim: int = 768,
150
+ num_attention_heads: int = 32,
151
+ dropout: float = 0.1,
152
+ attention_dropout: float = 0.1,
153
+ activation_dropout: float = 0.1,
154
+ layerdrop: float = 0.0,
155
+ encoder_normalize_before: bool = False,
156
+ pre_layernorm: bool = False,
157
+ apply_graphormer_init: bool = False,
158
+ activation_fn: str = "gelu",
159
+ embed_scale: float = None,
160
+ freeze_embeddings: bool = False,
161
+ num_trans_layers_to_freeze: int = 0,
162
+ traceable: bool = False,
163
+ q_noise: float = 0.0,
164
+ qn_block_size: int = 8,
165
+ kdim: int = None,
166
+ vdim: int = None,
167
+ bias: bool = True,
168
+ self_attention: bool = True,
169
+ pad_token_id=0,
170
+ bos_token_id=1,
171
+ eos_token_id=2,
172
+ **kwargs,
173
+ ):
174
+ self.num_classes = num_classes
175
+ self.num_atoms = num_atoms
176
+ self.num_in_degree = num_in_degree
177
+ self.num_out_degree = num_out_degree
178
+ self.num_edges = num_edges
179
+ self.num_spatial = num_spatial
180
+ self.num_edge_dis = num_edge_dis
181
+ self.edge_type = edge_type
182
+ self.multi_hop_max_dist = multi_hop_max_dist
183
+ self.spatial_pos_max = spatial_pos_max
184
+ self.max_nodes = max_nodes
185
+ self.num_hidden_layers = num_hidden_layers
186
+ self.embedding_dim = embedding_dim
187
+ self.hidden_size = embedding_dim
188
+ self.ffn_embedding_dim = ffn_embedding_dim
189
+ self.num_attention_heads = num_attention_heads
190
+ self.dropout = dropout
191
+ self.attention_dropout = attention_dropout
192
+ self.activation_dropout = activation_dropout
193
+ self.layerdrop = layerdrop
194
+ self.encoder_normalize_before = encoder_normalize_before
195
+ self.pre_layernorm = pre_layernorm
196
+ self.apply_graphormer_init = apply_graphormer_init
197
+ self.activation_fn = activation_fn
198
+ self.embed_scale = embed_scale
199
+ self.freeze_embeddings = freeze_embeddings
200
+ self.num_trans_layers_to_freeze = num_trans_layers_to_freeze
201
+ self.share_input_output_embed = share_input_output_embed
202
+ self.traceable = traceable
203
+ self.q_noise = q_noise
204
+ self.qn_block_size = qn_block_size
205
+
206
+ # These parameters are here for future extensions
207
+ # atm, the model only supports self attention
208
+ self.kdim = kdim
209
+ self.vdim = vdim
210
+ self.self_attention = self_attention
211
+ self.bias = bias
212
+
213
+ super().__init__(
214
+ pad_token_id=pad_token_id,
215
+ bos_token_id=bos_token_id,
216
+ eos_token_id=eos_token_id,
217
+ **kwargs,
218
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/graphormer/modeling_graphormer.py ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft, clefourrier The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Graphormer model."""
16
+
17
+ import math
18
+ from typing import Iterable, Iterator, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutputWithNoAttention,
27
+ SequenceClassifierOutput,
28
+ )
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import logging
31
+ from .configuration_graphormer import GraphormerConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _CHECKPOINT_FOR_DOC = "graphormer-base-pcqm4mv1"
37
+ _CONFIG_FOR_DOC = "GraphormerConfig"
38
+
39
+
40
+ from ..deprecated._archive_maps import GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
41
+
42
+
43
+ def quant_noise(module: nn.Module, p: float, block_size: int):
44
+ """
45
+ From:
46
+ https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/quant_noise.py
47
+
48
+ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product
49
+ Quantization as described in "Training with Quantization Noise for Extreme Model Compression"
50
+
51
+ Args:
52
+ - module: nn.Module
53
+ - p: amount of Quantization Noise
54
+ - block_size: size of the blocks for subsequent quantization with iPQ
55
+
56
+ Remarks:
57
+ - Module weights must have the right sizes wrt the block size
58
+ - Only Linear, Embedding and Conv2d modules are supported for the moment
59
+ - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down:
60
+ Revisiting the Quantization of Neural Networks"
61
+ - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping
62
+ blocks
63
+ """
64
+
65
+ # if no quantization noise, don't register hook
66
+ if p <= 0:
67
+ return module
68
+
69
+ # supported modules
70
+ if not isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)):
71
+ raise NotImplementedError("Module unsupported for quant_noise.")
72
+
73
+ # test whether module.weight has the right sizes wrt block_size
74
+ is_conv = module.weight.ndim == 4
75
+
76
+ # 2D matrix
77
+ if not is_conv:
78
+ if module.weight.size(1) % block_size != 0:
79
+ raise AssertionError("Input features must be a multiple of block sizes")
80
+
81
+ # 4D matrix
82
+ else:
83
+ # 1x1 convolutions
84
+ if module.kernel_size == (1, 1):
85
+ if module.in_channels % block_size != 0:
86
+ raise AssertionError("Input channels must be a multiple of block sizes")
87
+ # regular convolutions
88
+ else:
89
+ k = module.kernel_size[0] * module.kernel_size[1]
90
+ if k % block_size != 0:
91
+ raise AssertionError("Kernel size must be a multiple of block size")
92
+
93
+ def _forward_pre_hook(mod, input):
94
+ # no noise for evaluation
95
+ if mod.training:
96
+ if not is_conv:
97
+ # gather weight and sizes
98
+ weight = mod.weight
99
+ in_features = weight.size(1)
100
+ out_features = weight.size(0)
101
+
102
+ # split weight matrix into blocks and randomly drop selected blocks
103
+ mask = torch.zeros(in_features // block_size * out_features, device=weight.device)
104
+ mask.bernoulli_(p)
105
+ mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
106
+
107
+ else:
108
+ # gather weight and sizes
109
+ weight = mod.weight
110
+ in_channels = mod.in_channels
111
+ out_channels = mod.out_channels
112
+
113
+ # split weight matrix into blocks and randomly drop selected blocks
114
+ if mod.kernel_size == (1, 1):
115
+ mask = torch.zeros(
116
+ int(in_channels // block_size * out_channels),
117
+ device=weight.device,
118
+ )
119
+ mask.bernoulli_(p)
120
+ mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
121
+ else:
122
+ mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)
123
+ mask.bernoulli_(p)
124
+ mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
125
+
126
+ # scale weights and apply mask
127
+ mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript
128
+ s = 1 / (1 - p)
129
+ mod.weight.data = s * weight.masked_fill(mask, 0)
130
+
131
+ module.register_forward_pre_hook(_forward_pre_hook)
132
+ return module
133
+
134
+
135
+ class LayerDropModuleList(nn.ModuleList):
136
+ """
137
+ From:
138
+ https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/layer_drop.py
139
+ A LayerDrop implementation based on [`torch.nn.ModuleList`]. LayerDrop as described in
140
+ https://arxiv.org/abs/1909.11556.
141
+
142
+ We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During
143
+ evaluation we always iterate over all layers.
144
+
145
+ Usage:
146
+
147
+ ```python
148
+ layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
149
+ for layer in layers: # this might iterate over layers 1 and 3
150
+ x = layer(x)
151
+ for layer in layers: # this might iterate over all layers
152
+ x = layer(x)
153
+ for layer in layers: # this might not iterate over any layers
154
+ x = layer(x)
155
+ ```
156
+
157
+ Args:
158
+ p (float): probability of dropping out each layer
159
+ modules (iterable, optional): an iterable of modules to add
160
+ """
161
+
162
+ def __init__(self, p: float, modules: Optional[Iterable[nn.Module]] = None):
163
+ super().__init__(modules)
164
+ self.p = p
165
+
166
+ def __iter__(self) -> Iterator[nn.Module]:
167
+ dropout_probs = torch.empty(len(self)).uniform_()
168
+ for i, m in enumerate(super().__iter__()):
169
+ if not self.training or (dropout_probs[i] > self.p):
170
+ yield m
171
+
172
+
173
+ class GraphormerGraphNodeFeature(nn.Module):
174
+ """
175
+ Compute node features for each node in the graph.
176
+ """
177
+
178
+ def __init__(self, config: GraphormerConfig):
179
+ super().__init__()
180
+ self.num_heads = config.num_attention_heads
181
+ self.num_atoms = config.num_atoms
182
+
183
+ self.atom_encoder = nn.Embedding(config.num_atoms + 1, config.hidden_size, padding_idx=config.pad_token_id)
184
+ self.in_degree_encoder = nn.Embedding(
185
+ config.num_in_degree, config.hidden_size, padding_idx=config.pad_token_id
186
+ )
187
+ self.out_degree_encoder = nn.Embedding(
188
+ config.num_out_degree, config.hidden_size, padding_idx=config.pad_token_id
189
+ )
190
+
191
+ self.graph_token = nn.Embedding(1, config.hidden_size)
192
+
193
+ def forward(
194
+ self,
195
+ input_nodes: torch.LongTensor,
196
+ in_degree: torch.LongTensor,
197
+ out_degree: torch.LongTensor,
198
+ ) -> torch.Tensor:
199
+ n_graph, n_node = input_nodes.size()[:2]
200
+
201
+ node_feature = ( # node feature + graph token
202
+ self.atom_encoder(input_nodes).sum(dim=-2) # [n_graph, n_node, n_hidden]
203
+ + self.in_degree_encoder(in_degree)
204
+ + self.out_degree_encoder(out_degree)
205
+ )
206
+
207
+ graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)
208
+
209
+ graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
210
+
211
+ return graph_node_feature
212
+
213
+
214
+ class GraphormerGraphAttnBias(nn.Module):
215
+ """
216
+ Compute attention bias for each head.
217
+ """
218
+
219
+ def __init__(self, config: GraphormerConfig):
220
+ super().__init__()
221
+ self.num_heads = config.num_attention_heads
222
+ self.multi_hop_max_dist = config.multi_hop_max_dist
223
+
224
+ # We do not change edge feature embedding learning, as edge embeddings are represented as a combination of the original features
225
+ # + shortest path
226
+ self.edge_encoder = nn.Embedding(config.num_edges + 1, config.num_attention_heads, padding_idx=0)
227
+
228
+ self.edge_type = config.edge_type
229
+ if self.edge_type == "multi_hop":
230
+ self.edge_dis_encoder = nn.Embedding(
231
+ config.num_edge_dis * config.num_attention_heads * config.num_attention_heads,
232
+ 1,
233
+ )
234
+
235
+ self.spatial_pos_encoder = nn.Embedding(config.num_spatial, config.num_attention_heads, padding_idx=0)
236
+
237
+ self.graph_token_virtual_distance = nn.Embedding(1, config.num_attention_heads)
238
+
239
+ def forward(
240
+ self,
241
+ input_nodes: torch.LongTensor,
242
+ attn_bias: torch.Tensor,
243
+ spatial_pos: torch.LongTensor,
244
+ input_edges: torch.LongTensor,
245
+ attn_edge_type: torch.LongTensor,
246
+ ) -> torch.Tensor:
247
+ n_graph, n_node = input_nodes.size()[:2]
248
+ graph_attn_bias = attn_bias.clone()
249
+ graph_attn_bias = graph_attn_bias.unsqueeze(1).repeat(
250
+ 1, self.num_heads, 1, 1
251
+ ) # [n_graph, n_head, n_node+1, n_node+1]
252
+
253
+ # spatial pos
254
+ # [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
255
+ spatial_pos_bias = self.spatial_pos_encoder(spatial_pos).permute(0, 3, 1, 2)
256
+ graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + spatial_pos_bias
257
+
258
+ # reset spatial pos here
259
+ t = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)
260
+ graph_attn_bias[:, :, 1:, 0] = graph_attn_bias[:, :, 1:, 0] + t
261
+ graph_attn_bias[:, :, 0, :] = graph_attn_bias[:, :, 0, :] + t
262
+
263
+ # edge feature
264
+ if self.edge_type == "multi_hop":
265
+ spatial_pos_ = spatial_pos.clone()
266
+
267
+ spatial_pos_[spatial_pos_ == 0] = 1 # set pad to 1
268
+ # set 1 to 1, input_nodes > 1 to input_nodes - 1
269
+ spatial_pos_ = torch.where(spatial_pos_ > 1, spatial_pos_ - 1, spatial_pos_)
270
+ if self.multi_hop_max_dist > 0:
271
+ spatial_pos_ = spatial_pos_.clamp(0, self.multi_hop_max_dist)
272
+ input_edges = input_edges[:, :, :, : self.multi_hop_max_dist, :]
273
+ # [n_graph, n_node, n_node, max_dist, n_head]
274
+
275
+ input_edges = self.edge_encoder(input_edges).mean(-2)
276
+ max_dist = input_edges.size(-2)
277
+ edge_input_flat = input_edges.permute(3, 0, 1, 2, 4).reshape(max_dist, -1, self.num_heads)
278
+ edge_input_flat = torch.bmm(
279
+ edge_input_flat,
280
+ self.edge_dis_encoder.weight.reshape(-1, self.num_heads, self.num_heads)[:max_dist, :, :],
281
+ )
282
+ input_edges = edge_input_flat.reshape(max_dist, n_graph, n_node, n_node, self.num_heads).permute(
283
+ 1, 2, 3, 0, 4
284
+ )
285
+ input_edges = (input_edges.sum(-2) / (spatial_pos_.float().unsqueeze(-1))).permute(0, 3, 1, 2)
286
+ else:
287
+ # [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
288
+ input_edges = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)
289
+
290
+ graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + input_edges
291
+ graph_attn_bias = graph_attn_bias + attn_bias.unsqueeze(1) # reset
292
+
293
+ return graph_attn_bias
294
+
295
+
296
+ class GraphormerMultiheadAttention(nn.Module):
297
+ """Multi-headed attention.
298
+
299
+ See "Attention Is All You Need" for more details.
300
+ """
301
+
302
+ def __init__(self, config: GraphormerConfig):
303
+ super().__init__()
304
+ self.embedding_dim = config.embedding_dim
305
+ self.kdim = config.kdim if config.kdim is not None else config.embedding_dim
306
+ self.vdim = config.vdim if config.vdim is not None else config.embedding_dim
307
+ self.qkv_same_dim = self.kdim == config.embedding_dim and self.vdim == config.embedding_dim
308
+
309
+ self.num_heads = config.num_attention_heads
310
+ self.attention_dropout_module = torch.nn.Dropout(p=config.attention_dropout, inplace=False)
311
+
312
+ self.head_dim = config.embedding_dim // config.num_attention_heads
313
+ if not (self.head_dim * config.num_attention_heads == self.embedding_dim):
314
+ raise AssertionError("The embedding_dim must be divisible by num_heads.")
315
+ self.scaling = self.head_dim**-0.5
316
+
317
+ self.self_attention = True # config.self_attention
318
+ if not (self.self_attention):
319
+ raise NotImplementedError("The Graphormer model only supports self attention for now.")
320
+ if self.self_attention and not self.qkv_same_dim:
321
+ raise AssertionError("Self-attention requires query, key and value to be of the same size.")
322
+
323
+ self.k_proj = quant_noise(
324
+ nn.Linear(self.kdim, config.embedding_dim, bias=config.bias),
325
+ config.q_noise,
326
+ config.qn_block_size,
327
+ )
328
+ self.v_proj = quant_noise(
329
+ nn.Linear(self.vdim, config.embedding_dim, bias=config.bias),
330
+ config.q_noise,
331
+ config.qn_block_size,
332
+ )
333
+ self.q_proj = quant_noise(
334
+ nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),
335
+ config.q_noise,
336
+ config.qn_block_size,
337
+ )
338
+
339
+ self.out_proj = quant_noise(
340
+ nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),
341
+ config.q_noise,
342
+ config.qn_block_size,
343
+ )
344
+
345
+ self.onnx_trace = False
346
+
347
+ def reset_parameters(self):
348
+ if self.qkv_same_dim:
349
+ # Empirically observed the convergence to be much better with
350
+ # the scaled initialization
351
+ nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
352
+ nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
353
+ nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
354
+ else:
355
+ nn.init.xavier_uniform_(self.k_proj.weight)
356
+ nn.init.xavier_uniform_(self.v_proj.weight)
357
+ nn.init.xavier_uniform_(self.q_proj.weight)
358
+
359
+ nn.init.xavier_uniform_(self.out_proj.weight)
360
+ if self.out_proj.bias is not None:
361
+ nn.init.constant_(self.out_proj.bias, 0.0)
362
+
363
+ def forward(
364
+ self,
365
+ query: torch.LongTensor,
366
+ key: Optional[torch.Tensor],
367
+ value: Optional[torch.Tensor],
368
+ attn_bias: Optional[torch.Tensor],
369
+ key_padding_mask: Optional[torch.Tensor] = None,
370
+ need_weights: bool = True,
371
+ attn_mask: Optional[torch.Tensor] = None,
372
+ before_softmax: bool = False,
373
+ need_head_weights: bool = False,
374
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
375
+ """
376
+ Args:
377
+ key_padding_mask (Bytetorch.Tensor, optional): mask to exclude
378
+ keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s.
379
+ need_weights (bool, optional): return the attention weights,
380
+ averaged over heads (default: False).
381
+ attn_mask (Bytetorch.Tensor, optional): typically used to
382
+ implement causal attention, where the mask prevents the attention from looking forward in time
383
+ (default: None).
384
+ before_softmax (bool, optional): return the raw attention
385
+ weights and values before the attention softmax.
386
+ need_head_weights (bool, optional): return the attention
387
+ weights for each head. Implies *need_weights*. Default: return the average attention weights over all
388
+ heads.
389
+ """
390
+ if need_head_weights:
391
+ need_weights = True
392
+
393
+ tgt_len, bsz, embedding_dim = query.size()
394
+ src_len = tgt_len
395
+ if not (embedding_dim == self.embedding_dim):
396
+ raise AssertionError(
397
+ f"The query embedding dimension {embedding_dim} is not equal to the expected embedding_dim"
398
+ f" {self.embedding_dim}."
399
+ )
400
+ if not (list(query.size()) == [tgt_len, bsz, embedding_dim]):
401
+ raise AssertionError("Query size incorrect in Graphormer, compared to model dimensions.")
402
+
403
+ if key is not None:
404
+ src_len, key_bsz, _ = key.size()
405
+ if not torch.jit.is_scripting():
406
+ if (key_bsz != bsz) or (value is None) or not (src_len, bsz == value.shape[:2]):
407
+ raise AssertionError(
408
+ "The batch shape does not match the key or value shapes provided to the attention."
409
+ )
410
+
411
+ q = self.q_proj(query)
412
+ k = self.k_proj(query)
413
+ v = self.v_proj(query)
414
+
415
+ q *= self.scaling
416
+
417
+ q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
418
+ if k is not None:
419
+ k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
420
+ if v is not None:
421
+ v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
422
+
423
+ if (k is None) or not (k.size(1) == src_len):
424
+ raise AssertionError("The shape of the key generated in the attention is incorrect")
425
+
426
+ # This is part of a workaround to get around fork/join parallelism
427
+ # not supporting Optional types.
428
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
429
+ key_padding_mask = None
430
+
431
+ if key_padding_mask is not None:
432
+ if key_padding_mask.size(0) != bsz or key_padding_mask.size(1) != src_len:
433
+ raise AssertionError(
434
+ "The shape of the generated padding mask for the key does not match expected dimensions."
435
+ )
436
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
437
+ attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
438
+
439
+ if list(attn_weights.size()) != [bsz * self.num_heads, tgt_len, src_len]:
440
+ raise AssertionError("The attention weights generated do not match the expected dimensions.")
441
+
442
+ if attn_bias is not None:
443
+ attn_weights += attn_bias.view(bsz * self.num_heads, tgt_len, src_len)
444
+
445
+ if attn_mask is not None:
446
+ attn_mask = attn_mask.unsqueeze(0)
447
+ attn_weights += attn_mask
448
+
449
+ if key_padding_mask is not None:
450
+ # don't attend to padding symbols
451
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
452
+ attn_weights = attn_weights.masked_fill(
453
+ key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
454
+ )
455
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
456
+
457
+ if before_softmax:
458
+ return attn_weights, v
459
+
460
+ attn_weights_float = torch.nn.functional.softmax(attn_weights, dim=-1)
461
+ attn_weights = attn_weights_float.type_as(attn_weights)
462
+ attn_probs = self.attention_dropout_module(attn_weights)
463
+
464
+ if v is None:
465
+ raise AssertionError("No value generated")
466
+ attn = torch.bmm(attn_probs, v)
467
+ if list(attn.size()) != [bsz * self.num_heads, tgt_len, self.head_dim]:
468
+ raise AssertionError("The attention generated do not match the expected dimensions.")
469
+
470
+ attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embedding_dim)
471
+ attn: torch.Tensor = self.out_proj(attn)
472
+
473
+ attn_weights = None
474
+ if need_weights:
475
+ attn_weights = attn_weights_float.contiguous().view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
476
+ if not need_head_weights:
477
+ # average attention weights over heads
478
+ attn_weights = attn_weights.mean(dim=0)
479
+
480
+ return attn, attn_weights
481
+
482
+ def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor:
483
+ return attn_weights
484
+
485
+
486
+ class GraphormerGraphEncoderLayer(nn.Module):
487
+ def __init__(self, config: GraphormerConfig) -> None:
488
+ super().__init__()
489
+
490
+ # Initialize parameters
491
+ self.embedding_dim = config.embedding_dim
492
+ self.num_attention_heads = config.num_attention_heads
493
+ self.q_noise = config.q_noise
494
+ self.qn_block_size = config.qn_block_size
495
+ self.pre_layernorm = config.pre_layernorm
496
+
497
+ self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
498
+
499
+ self.activation_dropout_module = torch.nn.Dropout(p=config.activation_dropout, inplace=False)
500
+
501
+ # Initialize blocks
502
+ self.activation_fn = ACT2FN[config.activation_fn]
503
+ self.self_attn = GraphormerMultiheadAttention(config)
504
+
505
+ # layer norm associated with the self attention layer
506
+ self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim)
507
+
508
+ self.fc1 = self.build_fc(
509
+ self.embedding_dim,
510
+ config.ffn_embedding_dim,
511
+ q_noise=config.q_noise,
512
+ qn_block_size=config.qn_block_size,
513
+ )
514
+ self.fc2 = self.build_fc(
515
+ config.ffn_embedding_dim,
516
+ self.embedding_dim,
517
+ q_noise=config.q_noise,
518
+ qn_block_size=config.qn_block_size,
519
+ )
520
+
521
+ # layer norm associated with the position wise feed-forward NN
522
+ self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
523
+
524
+ def build_fc(
525
+ self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int
526
+ ) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]:
527
+ return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
528
+
529
+ def forward(
530
+ self,
531
+ input_nodes: torch.Tensor,
532
+ self_attn_bias: Optional[torch.Tensor] = None,
533
+ self_attn_mask: Optional[torch.Tensor] = None,
534
+ self_attn_padding_mask: Optional[torch.Tensor] = None,
535
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
536
+ """
537
+ nn.LayerNorm is applied either before or after the self-attention/ffn modules similar to the original
538
+ Transformer implementation.
539
+ """
540
+ residual = input_nodes
541
+ if self.pre_layernorm:
542
+ input_nodes = self.self_attn_layer_norm(input_nodes)
543
+
544
+ input_nodes, attn = self.self_attn(
545
+ query=input_nodes,
546
+ key=input_nodes,
547
+ value=input_nodes,
548
+ attn_bias=self_attn_bias,
549
+ key_padding_mask=self_attn_padding_mask,
550
+ need_weights=False,
551
+ attn_mask=self_attn_mask,
552
+ )
553
+ input_nodes = self.dropout_module(input_nodes)
554
+ input_nodes = residual + input_nodes
555
+ if not self.pre_layernorm:
556
+ input_nodes = self.self_attn_layer_norm(input_nodes)
557
+
558
+ residual = input_nodes
559
+ if self.pre_layernorm:
560
+ input_nodes = self.final_layer_norm(input_nodes)
561
+ input_nodes = self.activation_fn(self.fc1(input_nodes))
562
+ input_nodes = self.activation_dropout_module(input_nodes)
563
+ input_nodes = self.fc2(input_nodes)
564
+ input_nodes = self.dropout_module(input_nodes)
565
+ input_nodes = residual + input_nodes
566
+ if not self.pre_layernorm:
567
+ input_nodes = self.final_layer_norm(input_nodes)
568
+
569
+ return input_nodes, attn
570
+
571
+
572
+ class GraphormerGraphEncoder(nn.Module):
573
+ def __init__(self, config: GraphormerConfig):
574
+ super().__init__()
575
+
576
+ self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
577
+ self.layerdrop = config.layerdrop
578
+ self.embedding_dim = config.embedding_dim
579
+ self.apply_graphormer_init = config.apply_graphormer_init
580
+ self.traceable = config.traceable
581
+
582
+ self.graph_node_feature = GraphormerGraphNodeFeature(config)
583
+ self.graph_attn_bias = GraphormerGraphAttnBias(config)
584
+
585
+ self.embed_scale = config.embed_scale
586
+
587
+ if config.q_noise > 0:
588
+ self.quant_noise = quant_noise(
589
+ nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
590
+ config.q_noise,
591
+ config.qn_block_size,
592
+ )
593
+ else:
594
+ self.quant_noise = None
595
+
596
+ if config.encoder_normalize_before:
597
+ self.emb_layer_norm = nn.LayerNorm(self.embedding_dim)
598
+ else:
599
+ self.emb_layer_norm = None
600
+
601
+ if config.pre_layernorm:
602
+ self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
603
+
604
+ if self.layerdrop > 0.0:
605
+ self.layers = LayerDropModuleList(p=self.layerdrop)
606
+ else:
607
+ self.layers = nn.ModuleList([])
608
+ self.layers.extend([GraphormerGraphEncoderLayer(config) for _ in range(config.num_hidden_layers)])
609
+
610
+ # Apply initialization of model params after building the model
611
+ if config.freeze_embeddings:
612
+ raise NotImplementedError("Freezing embeddings is not implemented yet.")
613
+
614
+ for layer in range(config.num_trans_layers_to_freeze):
615
+ m = self.layers[layer]
616
+ if m is not None:
617
+ for p in m.parameters():
618
+ p.requires_grad = False
619
+
620
+ def forward(
621
+ self,
622
+ input_nodes: torch.LongTensor,
623
+ input_edges: torch.LongTensor,
624
+ attn_bias: torch.Tensor,
625
+ in_degree: torch.LongTensor,
626
+ out_degree: torch.LongTensor,
627
+ spatial_pos: torch.LongTensor,
628
+ attn_edge_type: torch.LongTensor,
629
+ perturb=None,
630
+ last_state_only: bool = False,
631
+ token_embeddings: Optional[torch.Tensor] = None,
632
+ attn_mask: Optional[torch.Tensor] = None,
633
+ ) -> Tuple[Union[torch.Tensor, List[torch.LongTensor]], torch.Tensor]:
634
+ # compute padding mask. This is needed for multi-head attention
635
+ data_x = input_nodes
636
+ n_graph, n_node = data_x.size()[:2]
637
+ padding_mask = (data_x[:, :, 0]).eq(0)
638
+ padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device, dtype=padding_mask.dtype)
639
+ padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
640
+
641
+ attn_bias = self.graph_attn_bias(input_nodes, attn_bias, spatial_pos, input_edges, attn_edge_type)
642
+
643
+ if token_embeddings is not None:
644
+ input_nodes = token_embeddings
645
+ else:
646
+ input_nodes = self.graph_node_feature(input_nodes, in_degree, out_degree)
647
+
648
+ if perturb is not None:
649
+ input_nodes[:, 1:, :] += perturb
650
+
651
+ if self.embed_scale is not None:
652
+ input_nodes = input_nodes * self.embed_scale
653
+
654
+ if self.quant_noise is not None:
655
+ input_nodes = self.quant_noise(input_nodes)
656
+
657
+ if self.emb_layer_norm is not None:
658
+ input_nodes = self.emb_layer_norm(input_nodes)
659
+
660
+ input_nodes = self.dropout_module(input_nodes)
661
+
662
+ input_nodes = input_nodes.transpose(0, 1)
663
+
664
+ inner_states = []
665
+ if not last_state_only:
666
+ inner_states.append(input_nodes)
667
+
668
+ for layer in self.layers:
669
+ input_nodes, _ = layer(
670
+ input_nodes,
671
+ self_attn_padding_mask=padding_mask,
672
+ self_attn_mask=attn_mask,
673
+ self_attn_bias=attn_bias,
674
+ )
675
+ if not last_state_only:
676
+ inner_states.append(input_nodes)
677
+
678
+ graph_rep = input_nodes[0, :, :]
679
+
680
+ if last_state_only:
681
+ inner_states = [input_nodes]
682
+
683
+ if self.traceable:
684
+ return torch.stack(inner_states), graph_rep
685
+ else:
686
+ return inner_states, graph_rep
687
+
688
+
689
+ class GraphormerDecoderHead(nn.Module):
690
+ def __init__(self, embedding_dim: int, num_classes: int):
691
+ super().__init__()
692
+ """num_classes should be 1 for regression, or the number of classes for classification"""
693
+ self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
694
+ self.classifier = nn.Linear(embedding_dim, num_classes, bias=False)
695
+ self.num_classes = num_classes
696
+
697
+ def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor:
698
+ input_nodes = self.classifier(input_nodes)
699
+ input_nodes = input_nodes + self.lm_output_learned_bias
700
+ return input_nodes
701
+
702
+
703
+ class GraphormerPreTrainedModel(PreTrainedModel):
704
+ """
705
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
706
+ models.
707
+ """
708
+
709
+ config_class = GraphormerConfig
710
+ base_model_prefix = "graphormer"
711
+ main_input_name_nodes = "input_nodes"
712
+ main_input_name_edges = "input_edges"
713
+
714
+ def normal_(self, data: torch.Tensor):
715
+ # with FSDP, module params will be on CUDA, so we cast them back to CPU
716
+ # so that the RNG is consistent with and without FSDP
717
+ data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
718
+
719
+ def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]):
720
+ """
721
+ Initialize the weights specific to the Graphormer Model.
722
+ """
723
+ if isinstance(module, nn.Linear):
724
+ self.normal_(module.weight.data)
725
+ if module.bias is not None:
726
+ module.bias.data.zero_()
727
+ if isinstance(module, nn.Embedding):
728
+ self.normal_(module.weight.data)
729
+ if module.padding_idx is not None:
730
+ module.weight.data[module.padding_idx].zero_()
731
+ if isinstance(module, GraphormerMultiheadAttention):
732
+ self.normal_(module.q_proj.weight.data)
733
+ self.normal_(module.k_proj.weight.data)
734
+ self.normal_(module.v_proj.weight.data)
735
+
736
+ def _init_weights(
737
+ self,
738
+ module: Union[
739
+ nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder
740
+ ],
741
+ ):
742
+ """
743
+ Initialize the weights
744
+ """
745
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
746
+ # We might be missing part of the Linear init, dependant on the layer num
747
+ module.weight.data.normal_(mean=0.0, std=0.02)
748
+ if module.bias is not None:
749
+ module.bias.data.zero_()
750
+ elif isinstance(module, nn.Embedding):
751
+ module.weight.data.normal_(mean=0.0, std=0.02)
752
+ if module.padding_idx is not None:
753
+ module.weight.data[module.padding_idx].zero_()
754
+ elif isinstance(module, GraphormerMultiheadAttention):
755
+ module.q_proj.weight.data.normal_(mean=0.0, std=0.02)
756
+ module.k_proj.weight.data.normal_(mean=0.0, std=0.02)
757
+ module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
758
+ module.reset_parameters()
759
+ elif isinstance(module, nn.LayerNorm):
760
+ module.bias.data.zero_()
761
+ module.weight.data.fill_(1.0)
762
+ elif isinstance(module, GraphormerGraphEncoder):
763
+ if module.apply_graphormer_init:
764
+ module.apply(self.init_graphormer_params)
765
+
766
+ elif isinstance(module, nn.LayerNorm):
767
+ module.bias.data.zero_()
768
+ module.weight.data.fill_(1.0)
769
+
770
+
771
+ class GraphormerModel(GraphormerPreTrainedModel):
772
+ """The Graphormer model is a graph-encoder model.
773
+
774
+ It goes from a graph to its representation. If you want to use the model for a downstream classification task, use
775
+ GraphormerForGraphClassification instead. For any other downstream task, feel free to add a new class, or combine
776
+ this model with a downstream model of your choice, following the example in GraphormerForGraphClassification.
777
+ """
778
+
779
+ def __init__(self, config: GraphormerConfig):
780
+ super().__init__(config)
781
+ self.max_nodes = config.max_nodes
782
+
783
+ self.graph_encoder = GraphormerGraphEncoder(config)
784
+
785
+ self.share_input_output_embed = config.share_input_output_embed
786
+ self.lm_output_learned_bias = None
787
+
788
+ # Remove head is set to true during fine-tuning
789
+ self.load_softmax = not getattr(config, "remove_head", False)
790
+
791
+ self.lm_head_transform_weight = nn.Linear(config.embedding_dim, config.embedding_dim)
792
+ self.activation_fn = ACT2FN[config.activation_fn]
793
+ self.layer_norm = nn.LayerNorm(config.embedding_dim)
794
+
795
+ self.post_init()
796
+
797
+ def reset_output_layer_parameters(self):
798
+ self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
799
+
800
+ def forward(
801
+ self,
802
+ input_nodes: torch.LongTensor,
803
+ input_edges: torch.LongTensor,
804
+ attn_bias: torch.Tensor,
805
+ in_degree: torch.LongTensor,
806
+ out_degree: torch.LongTensor,
807
+ spatial_pos: torch.LongTensor,
808
+ attn_edge_type: torch.LongTensor,
809
+ perturb: Optional[torch.FloatTensor] = None,
810
+ masked_tokens: None = None,
811
+ return_dict: Optional[bool] = None,
812
+ **unused,
813
+ ) -> Union[Tuple[torch.LongTensor], BaseModelOutputWithNoAttention]:
814
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
815
+
816
+ inner_states, graph_rep = self.graph_encoder(
817
+ input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, perturb=perturb
818
+ )
819
+
820
+ # last inner state, then revert Batch and Graph len
821
+ input_nodes = inner_states[-1].transpose(0, 1)
822
+
823
+ # project masked tokens only
824
+ if masked_tokens is not None:
825
+ raise NotImplementedError
826
+
827
+ input_nodes = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(input_nodes)))
828
+
829
+ # project back to size of vocabulary
830
+ if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, "weight"):
831
+ input_nodes = torch.nn.functional.linear(input_nodes, self.graph_encoder.embed_tokens.weight)
832
+
833
+ if not return_dict:
834
+ return tuple(x for x in [input_nodes, inner_states] if x is not None)
835
+ return BaseModelOutputWithNoAttention(last_hidden_state=input_nodes, hidden_states=inner_states)
836
+
837
+ def max_nodes(self):
838
+ """Maximum output length supported by the encoder."""
839
+ return self.max_nodes
840
+
841
+
842
+ class GraphormerForGraphClassification(GraphormerPreTrainedModel):
843
+ """
844
+ This model can be used for graph-level classification or regression tasks.
845
+
846
+ It can be trained on
847
+ - regression (by setting config.num_classes to 1); there should be one float-type label per graph
848
+ - one task classification (by setting config.num_classes to the number of classes); there should be one integer
849
+ label per graph
850
+ - binary multi-task classification (by setting config.num_classes to the number of labels); there should be a list
851
+ of integer labels for each graph.
852
+ """
853
+
854
+ def __init__(self, config: GraphormerConfig):
855
+ super().__init__(config)
856
+ self.encoder = GraphormerModel(config)
857
+ self.embedding_dim = config.embedding_dim
858
+ self.num_classes = config.num_classes
859
+ self.classifier = GraphormerDecoderHead(self.embedding_dim, self.num_classes)
860
+ self.is_encoder_decoder = True
861
+
862
+ # Initialize weights and apply final processing
863
+ self.post_init()
864
+
865
+ def forward(
866
+ self,
867
+ input_nodes: torch.LongTensor,
868
+ input_edges: torch.LongTensor,
869
+ attn_bias: torch.Tensor,
870
+ in_degree: torch.LongTensor,
871
+ out_degree: torch.LongTensor,
872
+ spatial_pos: torch.LongTensor,
873
+ attn_edge_type: torch.LongTensor,
874
+ labels: Optional[torch.LongTensor] = None,
875
+ return_dict: Optional[bool] = None,
876
+ **unused,
877
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
878
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
879
+
880
+ encoder_outputs = self.encoder(
881
+ input_nodes,
882
+ input_edges,
883
+ attn_bias,
884
+ in_degree,
885
+ out_degree,
886
+ spatial_pos,
887
+ attn_edge_type,
888
+ return_dict=True,
889
+ )
890
+ outputs, hidden_states = encoder_outputs["last_hidden_state"], encoder_outputs["hidden_states"]
891
+
892
+ head_outputs = self.classifier(outputs)
893
+ logits = head_outputs[:, 0, :].contiguous()
894
+
895
+ loss = None
896
+ if labels is not None:
897
+ mask = ~torch.isnan(labels)
898
+
899
+ if self.num_classes == 1: # regression
900
+ loss_fct = MSELoss()
901
+ loss = loss_fct(logits[mask].squeeze(), labels[mask].squeeze().float())
902
+ elif self.num_classes > 1 and len(labels.shape) == 1: # One task classification
903
+ loss_fct = CrossEntropyLoss()
904
+ loss = loss_fct(logits[mask].view(-1, self.num_classes), labels[mask].view(-1))
905
+ else: # Binary multi-task classification
906
+ loss_fct = BCEWithLogitsLoss(reduction="sum")
907
+ loss = loss_fct(logits[mask], labels[mask])
908
+
909
+ if not return_dict:
910
+ return tuple(x for x in [loss, logits, hidden_states] if x is not None)
911
+ return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None)
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_idefics": ["IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP", "IdeficsConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["image_processing_idefics"] = ["IdeficsImageProcessor"]
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_idefics"] = [
36
+ "IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "IdeficsForVisionText2Text",
38
+ "IdeficsModel",
39
+ "IdeficsPreTrainedModel",
40
+ ]
41
+ _import_structure["processing_idefics"] = ["IdeficsProcessor"]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_idefics import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP, IdeficsConfig
46
+
47
+ try:
48
+ if not is_vision_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .image_processing_idefics import IdeficsImageProcessor
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_idefics import (
62
+ IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST,
63
+ IdeficsForVisionText2Text,
64
+ IdeficsModel,
65
+ IdeficsPreTrainedModel,
66
+ )
67
+ from .processing_idefics import IdeficsProcessor
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/image_processing_idefics.cpython-310.pyc ADDED
Binary file (6.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/modeling_idefics.cpython-310.pyc ADDED
Binary file (49.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/processing_idefics.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/vision.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/perceiver.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License.
2
+ #
3
+ # MIT License
4
+ #
5
+ # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
6
+ #
7
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ # of this software and associated documentation files (the "Software"), to deal
9
+ # in the Software without restriction, including without limitation the rights
10
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ # copies of the Software, and to permit persons to whom the Software is
12
+ # furnished to do so, subject to the following conditions:
13
+ #
14
+ # The above copyright notice and this permission notice shall be included in all
15
+ # copies or substantial portions of the Software.
16
+ #
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ # SOFTWARE.
24
+
25
+
26
+ """
27
+
28
+ Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially
29
+ time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note
30
+ that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to
31
+ prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that
32
+ to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore.
33
+
34
+ References:
35
+ - DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model
36
+ - Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch
37
+
38
+ """
39
+ from typing import Optional, Tuple
40
+
41
+ import torch
42
+ import torch.nn as nn
43
+
44
+ from .configuration_idefics import IdeficsConfig
45
+
46
+
47
+ class IdeficsPerceiverResampler(nn.Module):
48
+ def __init__(
49
+ self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int
50
+ ) -> None:
51
+ """
52
+ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
53
+ MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
54
+ returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
55
+ to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
56
+ Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
57
+
58
+ Args:
59
+ config (`IdeficsConfig`): config object
60
+ embed_dim (`int`): The size of each embedding vector
61
+ depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
62
+ n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
63
+ head_dim (`int`): Dimensionality of each head projection in the Transformer block.
64
+ n_latents (`int`):
65
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
66
+
67
+ """
68
+ super().__init__()
69
+ self.embed_dim, self.n_heads, self.head_dim, self.n_latents = embed_dim, n_heads, head_dim, n_latents
70
+ self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver
71
+
72
+ # Create Latents for Perceiver
73
+ self.latents = nn.Parameter(torch.randn(self.n_latents, self.embed_dim), requires_grad=True)
74
+
75
+ self.intermediate_dim = (
76
+ self.embed_dim * 4
77
+ if not hasattr(config.vision_config, "embed_dim")
78
+ else config.vision_config.embed_dim * 4
79
+ )
80
+ # Create Transformer Blocks
81
+ self.blocks = nn.ModuleList(
82
+ [
83
+ nn.ModuleList(
84
+ [
85
+ IdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms),
86
+ IdeficsMLP(self.intermediate_dim, config),
87
+ ]
88
+ )
89
+ for _ in range(depth)
90
+ ]
91
+ )
92
+ self.layer_norm = nn.LayerNorm(self.embed_dim)
93
+
94
+ def forward(self, context: torch.Tensor) -> torch.Tensor:
95
+ """Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
96
+ # einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0])
97
+ latents = self.latents.repeat(context.shape[0], 1, 1)
98
+
99
+ # Feed through Perceiver Attention blocks...
100
+ for attn, ff in self.blocks:
101
+ latents = attn(context, latents) + latents
102
+ latents = ff(latents) + latents
103
+
104
+ return self.layer_norm(latents)
105
+
106
+
107
+ class IdeficsPerceiverAttention(nn.Module):
108
+ def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None:
109
+ """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
110
+ super().__init__()
111
+ self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim
112
+ self.qk_layer_norms = qk_layer_norms
113
+ # Normalization & Scaling
114
+ self.context_layer_norm = nn.LayerNorm(self.embed_dim)
115
+ self.latents_layer_norm = nn.LayerNorm(self.embed_dim)
116
+ if self.qk_layer_norms:
117
+ self.q_layer_norm = nn.LayerNorm(self.head_dim)
118
+ self.k_layer_norm = nn.LayerNorm(self.head_dim)
119
+
120
+ self.qk_scale = self.head_dim**-0.5
121
+
122
+ # Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers).
123
+ self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
124
+ self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
125
+ self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
126
+
127
+ self.output_proj = nn.Linear(self.n_heads * self.head_dim, embed_dim, bias=False)
128
+
129
+ def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
130
+ """
131
+ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
132
+
133
+ Args:
134
+ context (`torch.Tensor`):
135
+ Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
136
+ latents (`torch.Tensor`):
137
+ Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
138
+
139
+ Returns:
140
+ `torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
141
+ from context.
142
+ """
143
+ context = self.context_layer_norm(context)
144
+ latents = self.latents_layer_norm(latents)
145
+ batch_size, seq_length, embed_dim = context.shape[:3]
146
+
147
+ # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
148
+ # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
149
+ q = self.q_proj(latents)
150
+ k = self.k_proj(torch.cat([context, latents], dim=-2))
151
+ v = self.v_proj(torch.cat([context, latents], dim=-2))
152
+
153
+ # Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call)
154
+ # =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)]
155
+ # einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads)
156
+ q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)]
157
+
158
+ if self.qk_layer_norms:
159
+ q = self.q_layer_norm(q)
160
+ k = self.k_layer_norm(k)
161
+
162
+ scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k)
163
+ stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach())
164
+ attn = stabilized_scores.softmax(dim=-1)
165
+
166
+ # Attend & project back to output...
167
+ resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v)
168
+ # einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads)
169
+ return self.output_proj(resampled.transpose(1, 2).flatten(-2))
170
+
171
+
172
+ class IdeficsMLP(nn.Module):
173
+ def __init__(self, intermediate_size, config: IdeficsConfig):
174
+ """Simple MLP block with intermediate_size and embedding size"""
175
+ super().__init__()
176
+ self.embed_dim = config.vision_config.embed_dim
177
+ self.ln = nn.LayerNorm(self.embed_dim)
178
+ self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False)
179
+ self.act = nn.ReLU()
180
+ self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False)
181
+
182
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
183
+ hidden_states = self.ln(hidden_states)
184
+ hidden_states = self.fc(hidden_states)
185
+ hidden_states = self.act(hidden_states)
186
+ hidden_states = self.c_proj(hidden_states)
187
+
188
+ return hidden_states
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/configuration_instructblip.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/processing_instructblip.cpython-310.pyc ADDED
Binary file (5.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__init__.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_longformer": [
28
+ "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "LongformerConfig",
30
+ "LongformerOnnxConfig",
31
+ ],
32
+ "tokenization_longformer": ["LongformerTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_longformer_fast"] = ["LongformerTokenizerFast"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_longformer"] = [
50
+ "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "LongformerForMaskedLM",
52
+ "LongformerForMultipleChoice",
53
+ "LongformerForQuestionAnswering",
54
+ "LongformerForSequenceClassification",
55
+ "LongformerForTokenClassification",
56
+ "LongformerModel",
57
+ "LongformerPreTrainedModel",
58
+ "LongformerSelfAttention",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_longformer"] = [
68
+ "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
69
+ "TFLongformerForMaskedLM",
70
+ "TFLongformerForMultipleChoice",
71
+ "TFLongformerForQuestionAnswering",
72
+ "TFLongformerForSequenceClassification",
73
+ "TFLongformerForTokenClassification",
74
+ "TFLongformerModel",
75
+ "TFLongformerPreTrainedModel",
76
+ "TFLongformerSelfAttention",
77
+ ]
78
+
79
+
80
+ if TYPE_CHECKING:
81
+ from .configuration_longformer import (
82
+ LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
83
+ LongformerConfig,
84
+ LongformerOnnxConfig,
85
+ )
86
+ from .tokenization_longformer import LongformerTokenizer
87
+
88
+ try:
89
+ if not is_tokenizers_available():
90
+ raise OptionalDependencyNotAvailable()
91
+ except OptionalDependencyNotAvailable:
92
+ pass
93
+ else:
94
+ from .tokenization_longformer_fast import LongformerTokenizerFast
95
+
96
+ try:
97
+ if not is_torch_available():
98
+ raise OptionalDependencyNotAvailable()
99
+ except OptionalDependencyNotAvailable:
100
+ pass
101
+ else:
102
+ from .modeling_longformer import (
103
+ LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
104
+ LongformerForMaskedLM,
105
+ LongformerForMultipleChoice,
106
+ LongformerForQuestionAnswering,
107
+ LongformerForSequenceClassification,
108
+ LongformerForTokenClassification,
109
+ LongformerModel,
110
+ LongformerPreTrainedModel,
111
+ LongformerSelfAttention,
112
+ )
113
+
114
+ try:
115
+ if not is_tf_available():
116
+ raise OptionalDependencyNotAvailable()
117
+ except OptionalDependencyNotAvailable:
118
+ pass
119
+ else:
120
+ from .modeling_tf_longformer import (
121
+ TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
122
+ TFLongformerForMaskedLM,
123
+ TFLongformerForMultipleChoice,
124
+ TFLongformerForQuestionAnswering,
125
+ TFLongformerForSequenceClassification,
126
+ TFLongformerForTokenClassification,
127
+ TFLongformerModel,
128
+ TFLongformerPreTrainedModel,
129
+ TFLongformerSelfAttention,
130
+ )
131
+
132
+ else:
133
+ import sys
134
+
135
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc ADDED
Binary file (8.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc ADDED
Binary file (77.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc ADDED
Binary file (84.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc ADDED
Binary file (9.61 kB). View file