applied-ai-018 commited on
Commit
7da0820
·
verified ·
1 Parent(s): 00f7b68

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py +29 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py +782 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py +88 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py +471 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py +134 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py +33 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py +331 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py +1564 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py +142 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py +56 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py +186 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py +288 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py +795 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py +142 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__init__.py +112 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py +220 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py +718 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py +1430 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py +1104 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py +97 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py +453 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py +217 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py +2135 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__init__.py +114 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_bertweet": ["BertweetTokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_bertweet import BertweetTokenizer
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (510 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc ADDED
Binary file (21.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
3
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Tokenization classes for BERTweet"""
17
+
18
+
19
+ import html
20
+ import os
21
+ import re
22
+ from shutil import copyfile
23
+ from typing import List, Optional, Tuple
24
+
25
+ import regex
26
+
27
+ from ...tokenization_utils import PreTrainedTokenizer
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ VOCAB_FILES_NAMES = {
34
+ "vocab_file": "vocab.txt",
35
+ "merges_file": "bpe.codes",
36
+ }
37
+
38
+ PRETRAINED_VOCAB_FILES_MAP = {
39
+ "vocab_file": {
40
+ "vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/vocab.txt",
41
+ },
42
+ "merges_file": {
43
+ "vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/bpe.codes",
44
+ },
45
+ }
46
+
47
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
48
+ "vinai/bertweet-base": 128,
49
+ }
50
+
51
+
52
+ def get_pairs(word):
53
+ """
54
+ Return set of symbol pairs in a word.
55
+
56
+ Word is represented as tuple of symbols (symbols being variable-length strings).
57
+ """
58
+ pairs = set()
59
+ prev_char = word[0]
60
+ for char in word[1:]:
61
+ pairs.add((prev_char, char))
62
+ prev_char = char
63
+
64
+ pairs = set(pairs)
65
+ return pairs
66
+
67
+
68
+ class BertweetTokenizer(PreTrainedTokenizer):
69
+ """
70
+ Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
71
+
72
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
73
+ this superclass for more information regarding those methods.
74
+
75
+ Args:
76
+ vocab_file (`str`):
77
+ Path to the vocabulary file.
78
+ merges_file (`str`):
79
+ Path to the merges file.
80
+ normalization (`bool`, *optional*, defaults to `False`):
81
+ Whether or not to apply a normalization preprocess.
82
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
83
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
84
+
85
+ <Tip>
86
+
87
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
88
+ sequence. The token used is the `cls_token`.
89
+
90
+ </Tip>
91
+
92
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
93
+ The end of sequence token.
94
+
95
+ <Tip>
96
+
97
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
98
+ The token used is the `sep_token`.
99
+
100
+ </Tip>
101
+
102
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
103
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
104
+ sequence classification or for a text and a question for question answering. It is also used as the last
105
+ token of a sequence built with special tokens.
106
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
107
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
108
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
109
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
110
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
111
+ token instead.
112
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
113
+ The token used for padding, for example when batching sequences of different lengths.
114
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
115
+ The token used for masking values. This is the token used when training this model with masked language
116
+ modeling. This is the token which the model will try to predict.
117
+ """
118
+
119
+ vocab_files_names = VOCAB_FILES_NAMES
120
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
121
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
122
+
123
+ def __init__(
124
+ self,
125
+ vocab_file,
126
+ merges_file,
127
+ normalization=False,
128
+ bos_token="<s>",
129
+ eos_token="</s>",
130
+ sep_token="</s>",
131
+ cls_token="<s>",
132
+ unk_token="<unk>",
133
+ pad_token="<pad>",
134
+ mask_token="<mask>",
135
+ **kwargs,
136
+ ):
137
+ try:
138
+ from emoji import demojize
139
+
140
+ self.demojizer = demojize
141
+ except ImportError:
142
+ logger.warning(
143
+ "emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3"
144
+ " install emoji==0.6.0"
145
+ )
146
+ self.demojizer = None
147
+
148
+ self.vocab_file = vocab_file
149
+ self.merges_file = merges_file
150
+
151
+ self.encoder = {}
152
+ self.encoder[str(bos_token)] = 0
153
+ self.encoder[str(pad_token)] = 1
154
+ self.encoder[str(eos_token)] = 2
155
+ self.encoder[str(unk_token)] = 3
156
+
157
+ self.add_from_file(vocab_file)
158
+
159
+ self.decoder = {v: k for k, v in self.encoder.items()}
160
+
161
+ with open(merges_file, encoding="utf-8") as merges_handle:
162
+ merges = merges_handle.read().split("\n")[:-1]
163
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
164
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
165
+ self.cache = {}
166
+
167
+ self.normalization = normalization
168
+ self.tweetPreprocessor = TweetTokenizer()
169
+ self.special_puncts = {"’": "'", "…": "..."}
170
+
171
+ super().__init__(
172
+ normalization=normalization,
173
+ bos_token=bos_token,
174
+ eos_token=eos_token,
175
+ sep_token=sep_token,
176
+ cls_token=cls_token,
177
+ unk_token=unk_token,
178
+ pad_token=pad_token,
179
+ mask_token=mask_token,
180
+ **kwargs,
181
+ )
182
+
183
+ def build_inputs_with_special_tokens(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
188
+ adding special tokens. A BERTweet sequence has the following format:
189
+
190
+ - single sequence: `<s> X </s>`
191
+ - pair of sequences: `<s> A </s></s> B </s>`
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs to which the special tokens will be added.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+
199
+ Returns:
200
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
201
+ """
202
+
203
+ if token_ids_1 is None:
204
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
205
+ cls = [self.cls_token_id]
206
+ sep = [self.sep_token_id]
207
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
208
+
209
+ def get_special_tokens_mask(
210
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
211
+ ) -> List[int]:
212
+ """
213
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
214
+ special tokens using the tokenizer `prepare_for_model` method.
215
+
216
+ Args:
217
+ token_ids_0 (`List[int]`):
218
+ List of IDs.
219
+ token_ids_1 (`List[int]`, *optional*):
220
+ Optional second list of IDs for sequence pairs.
221
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
222
+ Whether or not the token list is already formatted with special tokens for the model.
223
+
224
+ Returns:
225
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
226
+ """
227
+
228
+ if already_has_special_tokens:
229
+ return super().get_special_tokens_mask(
230
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
231
+ )
232
+
233
+ if token_ids_1 is None:
234
+ return [1] + ([0] * len(token_ids_0)) + [1]
235
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
236
+
237
+ def create_token_type_ids_from_sequences(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
242
+ not make use of token type ids, therefore a list of zeros is returned.
243
+
244
+ Args:
245
+ token_ids_0 (`List[int]`):
246
+ List of IDs.
247
+ token_ids_1 (`List[int]`, *optional*):
248
+ Optional second list of IDs for sequence pairs.
249
+
250
+ Returns:
251
+ `List[int]`: List of zeros.
252
+ """
253
+
254
+ sep = [self.sep_token_id]
255
+ cls = [self.cls_token_id]
256
+
257
+ if token_ids_1 is None:
258
+ return len(cls + token_ids_0 + sep) * [0]
259
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
260
+
261
+ @property
262
+ def vocab_size(self):
263
+ return len(self.encoder)
264
+
265
+ def get_vocab(self):
266
+ return dict(self.encoder, **self.added_tokens_encoder)
267
+
268
+ def bpe(self, token):
269
+ if token in self.cache:
270
+ return self.cache[token]
271
+ word = tuple(token)
272
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
273
+ pairs = get_pairs(word)
274
+
275
+ if not pairs:
276
+ return token
277
+
278
+ while True:
279
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
280
+ if bigram not in self.bpe_ranks:
281
+ break
282
+ first, second = bigram
283
+ new_word = []
284
+ i = 0
285
+ while i < len(word):
286
+ try:
287
+ j = word.index(first, i)
288
+ except ValueError:
289
+ new_word.extend(word[i:])
290
+ break
291
+ else:
292
+ new_word.extend(word[i:j])
293
+ i = j
294
+
295
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
296
+ new_word.append(first + second)
297
+ i += 2
298
+ else:
299
+ new_word.append(word[i])
300
+ i += 1
301
+ new_word = tuple(new_word)
302
+ word = new_word
303
+ if len(word) == 1:
304
+ break
305
+ else:
306
+ pairs = get_pairs(word)
307
+ word = "@@ ".join(word)
308
+ word = word[:-4]
309
+ self.cache[token] = word
310
+ return word
311
+
312
+ def _tokenize(self, text):
313
+ """Tokenize a string."""
314
+ if self.normalization: # Perform Tweet normalization before performing BPE
315
+ text = self.normalizeTweet(text)
316
+
317
+ split_tokens = []
318
+ words = re.findall(r"\S+\n?", text)
319
+ for token in words:
320
+ split_tokens.extend(list(self.bpe(token).split(" ")))
321
+ return split_tokens
322
+
323
+ def normalizeTweet(self, tweet):
324
+ """
325
+ Normalize a raw Tweet
326
+ """
327
+ for punct in self.special_puncts:
328
+ tweet = tweet.replace(punct, self.special_puncts[punct])
329
+
330
+ tokens = self.tweetPreprocessor.tokenize(tweet)
331
+ normTweet = " ".join([self.normalizeToken(token) for token in tokens])
332
+
333
+ normTweet = (
334
+ normTweet.replace("cannot ", "can not ")
335
+ .replace("n't ", " n't ")
336
+ .replace("n 't ", " n't ")
337
+ .replace("ca n't", "can't")
338
+ .replace("ai n't", "ain't")
339
+ )
340
+ normTweet = (
341
+ normTweet.replace("'m ", " 'm ")
342
+ .replace("'re ", " 're ")
343
+ .replace("'s ", " 's ")
344
+ .replace("'ll ", " 'll ")
345
+ .replace("'d ", " 'd ")
346
+ .replace("'ve ", " 've ")
347
+ )
348
+ normTweet = (
349
+ normTweet.replace(" p . m .", " p.m.")
350
+ .replace(" p . m ", " p.m ")
351
+ .replace(" a . m .", " a.m.")
352
+ .replace(" a . m ", " a.m ")
353
+ )
354
+
355
+ return " ".join(normTweet.split())
356
+
357
+ def normalizeToken(self, token):
358
+ """
359
+ Normalize tokens in a Tweet
360
+ """
361
+ lowercased_token = token.lower()
362
+ if token.startswith("@"):
363
+ return "@USER"
364
+ elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
365
+ return "HTTPURL"
366
+ elif len(token) == 1:
367
+ if token in self.special_puncts:
368
+ return self.special_puncts[token]
369
+ if self.demojizer is not None:
370
+ return self.demojizer(token)
371
+ else:
372
+ return token
373
+ else:
374
+ return token
375
+
376
+ def _convert_token_to_id(self, token):
377
+ """Converts a token (str) in an id using the vocab."""
378
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
379
+
380
+ def _convert_id_to_token(self, index):
381
+ """Converts an index (integer) in a token (str) using the vocab."""
382
+ return self.decoder.get(index, self.unk_token)
383
+
384
+ def convert_tokens_to_string(self, tokens):
385
+ """Converts a sequence of tokens (string) in a single string."""
386
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
387
+ return out_string
388
+
389
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
390
+ if not os.path.isdir(save_directory):
391
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
392
+ return
393
+ out_vocab_file = os.path.join(
394
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
395
+ )
396
+ out_merge_file = os.path.join(
397
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
398
+ )
399
+
400
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
401
+ copyfile(self.vocab_file, out_vocab_file)
402
+ elif not os.path.isfile(self.vocab_file):
403
+ with open(out_vocab_file, "wb") as fi:
404
+ content_spiece_model = self.sp_model.serialized_model_proto()
405
+ fi.write(content_spiece_model)
406
+
407
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
408
+ copyfile(self.merges_file, out_merge_file)
409
+
410
+ return out_vocab_file, out_merge_file
411
+
412
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
413
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
414
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
415
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
416
+ # return ''.join(tokens_generated_so_far)
417
+
418
+ def add_from_file(self, f):
419
+ """
420
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
421
+ """
422
+ if isinstance(f, str):
423
+ try:
424
+ with open(f, "r", encoding="utf-8") as fd:
425
+ self.add_from_file(fd)
426
+ except FileNotFoundError as fnfe:
427
+ raise fnfe
428
+ except UnicodeError:
429
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
430
+ return
431
+
432
+ lines = f.readlines()
433
+ for lineTmp in lines:
434
+ line = lineTmp.strip()
435
+ idx = line.rfind(" ")
436
+ if idx == -1:
437
+ raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
438
+ word = line[:idx]
439
+ self.encoder[word] = len(self.encoder)
440
+
441
+
442
+ # Natural Language Toolkit: Twitter Tokenizer
443
+ #
444
+ # Copyright (C) 2001-2020 NLTK Project
445
+ # Author: Christopher Potts <[email protected]>
446
+ # Ewan Klein <[email protected]> (modifications)
447
+ # Pierpaolo Pantone <> (modifications)
448
+ # URL: http://nltk.org/
449
+ # For license information, see LICENSE.TXT
450
+ #
451
+
452
+
453
+ """
454
+ Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
455
+
456
+ 1. The tuple regex_strings defines a list of regular expression strings.
457
+
458
+ 2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
459
+
460
+ 3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
461
+ the class Tokenizer.
462
+
463
+ 4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
464
+ is set to False, then the tokenizer will lowercase everything except for emoticons.
465
+
466
+ """
467
+
468
+
469
+ ######################################################################
470
+ #
471
+ # import regex # https://github.com/nltk/nltk/issues/2409
472
+ # import html
473
+ #
474
+ ######################################################################
475
+ # The following strings are components in the regular expression
476
+ # that is used for tokenizing. It's important that phone_number
477
+ # appears first in the final regex (since it can contain whitespace).
478
+ # It also could matter that tags comes after emoticons, due to the
479
+ # possibility of having text like
480
+ #
481
+ # <:| and some text >:)
482
+ #
483
+ # Most importantly, the final element should always be last, since it
484
+ # does a last ditch whitespace-based tokenization of whatever is left.
485
+
486
+ # ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
487
+
488
+ # This particular element is used in a couple ways, so we define it
489
+ # with a name:
490
+ # docstyle-ignore
491
+ EMOTICONS = r"""
492
+ (?:
493
+ [<>]?
494
+ [:;=8] # eyes
495
+ [\-o\*\']? # optional nose
496
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
497
+ |
498
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
499
+ [\-o\*\']? # optional nose
500
+ [:;=8] # eyes
501
+ [<>]?
502
+ |
503
+ <3 # heart
504
+ )"""
505
+
506
+ # URL pattern due to John Gruber, modified by Tom Winzig. See
507
+ # https://gist.github.com/winzig/8894715
508
+ # docstyle-ignore
509
+ URLS = r""" # Capture 1: entire matched URL
510
+ (?:
511
+ https?: # URL protocol and colon
512
+ (?:
513
+ /{1,3} # 1-3 slashes
514
+ | # or
515
+ [a-z0-9%] # Single letter or digit or '%'
516
+ # (Trying not to match e.g. "URI::Escape")
517
+ )
518
+ | # or
519
+ # looks like domain name followed by a slash:
520
+ [a-z0-9.\-]+[.]
521
+ (?:[a-z]{2,13})
522
+ /
523
+ )
524
+ (?: # One or more:
525
+ [^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
526
+ | # or
527
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
528
+ |
529
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
530
+ )+
531
+ (?: # End with:
532
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
533
+ |
534
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
535
+ | # or
536
+ [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
537
+ )
538
+ | # OR, the following to match naked domains:
539
+ (?:
540
+ (?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
541
+ [a-z0-9]+
542
+ (?:[.\-][a-z0-9]+)*
543
+ [.]
544
+ (?:[a-z]{2,13})
545
+ \b
546
+ /?
547
+ (?!@) # not succeeded by a @,
548
+ # avoid matching "foo.na" in "[email protected]"
549
+ )
550
+ """
551
+
552
+ # docstyle-ignore
553
+ # The components of the tokenizer:
554
+ REGEXPS = (
555
+ URLS,
556
+ # Phone numbers:
557
+ r"""
558
+ (?:
559
+ (?: # (international)
560
+ \+?[01]
561
+ [ *\-.\)]*
562
+ )?
563
+ (?: # (area code)
564
+ [\(]?
565
+ \d{3}
566
+ [ *\-.\)]*
567
+ )?
568
+ \d{3} # exchange
569
+ [ *\-.\)]*
570
+ \d{4} # base
571
+ )""",
572
+ # ASCII Emoticons
573
+ EMOTICONS,
574
+ # HTML tags:
575
+ r"""<[^>\s]+>""",
576
+ # ASCII Arrows
577
+ r"""[\-]+>|<[\-]+""",
578
+ # Twitter username:
579
+ r"""(?:@[\w_]+)""",
580
+ # Twitter hashtags:
581
+ r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
582
+ # email addresses
583
+ r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
584
+ # docstyle-ignore
585
+ # Remaining word types:
586
+ r"""
587
+ (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
588
+ |
589
+ (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
590
+ |
591
+ (?:[\w_]+) # Words without apostrophes or dashes.
592
+ |
593
+ (?:\.(?:\s*\.){1,}) # Ellipsis dots.
594
+ |
595
+ (?:\S) # Everything else that isn't whitespace.
596
+ """,
597
+ )
598
+
599
+ ######################################################################
600
+ # This is the core tokenizing regex:
601
+
602
+ WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
603
+
604
+ # WORD_RE performs poorly on these patterns:
605
+ HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
606
+
607
+ # The emoticon string gets its own regex so that we can preserve case for
608
+ # them as needed:
609
+ EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
610
+
611
+ # These are for regularizing HTML entities to Unicode:
612
+ ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
613
+
614
+
615
+ ######################################################################
616
+ # Functions for converting html entities
617
+ ######################################################################
618
+
619
+
620
+ def _str_to_unicode(text, encoding=None, errors="strict"):
621
+ if encoding is None:
622
+ encoding = "utf-8"
623
+ if isinstance(text, bytes):
624
+ return text.decode(encoding, errors)
625
+ return text
626
+
627
+
628
+ def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
629
+ """
630
+ Remove entities from text by converting them to their corresponding unicode character.
631
+
632
+ Args:
633
+ text:
634
+ A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
635
+ keep (list):
636
+ List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and
637
+ `&#hhhh;`) and named entities (such as `&nbsp;` or `&gt;`).
638
+ remove_illegal (bool):
639
+ If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
640
+ kept "as is".
641
+
642
+ Returns: A unicode string with the entities removed.
643
+
644
+ See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
645
+
646
+ Examples:
647
+
648
+ ```python
649
+ >>> from nltk.tokenize.casual import _replace_html_entities
650
+
651
+ >>> _replace_html_entities(b"Price: &pound;100")
652
+ 'Price: \\xa3100'
653
+
654
+ >>> print(_replace_html_entities(b"Price: &pound;100"))
655
+ Price: £100
656
+ ```"""
657
+
658
+ def _convert_entity(match):
659
+ entity_body = match.group(3)
660
+ if match.group(1):
661
+ try:
662
+ if match.group(2):
663
+ number = int(entity_body, 16)
664
+ else:
665
+ number = int(entity_body, 10)
666
+ # Numeric character references in the 80-9F range are typically
667
+ # interpreted by browsers as representing the characters mapped
668
+ # to bytes 80-9F in the Windows-1252 encoding. For more info
669
+ # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
670
+ if 0x80 <= number <= 0x9F:
671
+ return bytes((number,)).decode("cp1252")
672
+ except ValueError:
673
+ number = None
674
+ else:
675
+ if entity_body in keep:
676
+ return match.group(0)
677
+ else:
678
+ number = html.entities.name2codepoint.get(entity_body)
679
+ if number is not None:
680
+ try:
681
+ return chr(number)
682
+ except (ValueError, OverflowError):
683
+ pass
684
+
685
+ return "" if remove_illegal else match.group(0)
686
+
687
+ return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
688
+
689
+
690
+ ######################################################################
691
+
692
+
693
+ class TweetTokenizer:
694
+ r"""
695
+ Examples:
696
+
697
+ ```python
698
+ >>> # Tokenizer for tweets.
699
+ >>> from nltk.tokenize import TweetTokenizer
700
+
701
+ >>> tknzr = TweetTokenizer()
702
+ >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
703
+ >>> tknzr.tokenize(s0)
704
+ ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
705
+
706
+ >>> # Examples using *strip_handles* and *reduce_len parameters*:
707
+ >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
708
+ >>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
709
+ >>> tknzr.tokenize(s1)
710
+ [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
711
+ ```"""
712
+
713
+ def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
714
+ self.preserve_case = preserve_case
715
+ self.reduce_len = reduce_len
716
+ self.strip_handles = strip_handles
717
+
718
+ def tokenize(self, text):
719
+ """
720
+ Args:
721
+ text: str
722
+
723
+ Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
724
+ `preserve_case=False`
725
+ """
726
+ # Fix HTML character entities:
727
+ text = _replace_html_entities(text)
728
+ # Remove username handles
729
+ if self.strip_handles:
730
+ text = remove_handles(text)
731
+ # Normalize word lengthening
732
+ if self.reduce_len:
733
+ text = reduce_lengthening(text)
734
+ # Shorten problematic sequences of characters
735
+ safe_text = HANG_RE.sub(r"\1\1\1", text)
736
+ # Tokenize:
737
+ words = WORD_RE.findall(safe_text)
738
+ # Possibly alter the case, but avoid changing emoticons like :D into :d:
739
+ if not self.preserve_case:
740
+ words = [x if EMOTICON_RE.search(x) else x.lower() for x in words]
741
+ return words
742
+
743
+
744
+ ######################################################################
745
+ # Normalization Functions
746
+ ######################################################################
747
+
748
+
749
+ def reduce_lengthening(text):
750
+ """
751
+ Replace repeated character sequences of length 3 or greater with sequences of length 3.
752
+ """
753
+ pattern = regex.compile(r"(.)\1{2,}")
754
+ return pattern.sub(r"\1\1\1", text)
755
+
756
+
757
+ def remove_handles(text):
758
+ """
759
+ Remove Twitter username handles from text.
760
+ """
761
+ pattern = regex.compile(
762
+ r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
763
+ )
764
+ # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
765
+ return pattern.sub(" ", text)
766
+
767
+
768
+ ######################################################################
769
+ # Tokenization Function
770
+ ######################################################################
771
+
772
+
773
+ def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
774
+ """
775
+ Convenience function for wrapping the tokenizer.
776
+ """
777
+ return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
778
+ text
779
+ )
780
+
781
+
782
+ ###############################################################################
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_chinese_clip": [
21
+ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "ChineseCLIPConfig",
23
+ "ChineseCLIPOnnxConfig",
24
+ "ChineseCLIPTextConfig",
25
+ "ChineseCLIPVisionConfig",
26
+ ],
27
+ "processing_chinese_clip": ["ChineseCLIPProcessor"],
28
+ }
29
+
30
+ try:
31
+ if not is_vision_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"]
37
+ _import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_chinese_clip"] = [
46
+ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "ChineseCLIPModel",
48
+ "ChineseCLIPPreTrainedModel",
49
+ "ChineseCLIPTextModel",
50
+ "ChineseCLIPVisionModel",
51
+ ]
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_chinese_clip import (
55
+ CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ ChineseCLIPConfig,
57
+ ChineseCLIPOnnxConfig,
58
+ ChineseCLIPTextConfig,
59
+ ChineseCLIPVisionConfig,
60
+ )
61
+ from .processing_chinese_clip import ChineseCLIPProcessor
62
+
63
+ try:
64
+ if not is_vision_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
70
+
71
+ try:
72
+ if not is_torch_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .modeling_chinese_clip import (
78
+ CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
79
+ ChineseCLIPModel,
80
+ ChineseCLIPPreTrainedModel,
81
+ ChineseCLIPTextModel,
82
+ ChineseCLIPVisionModel,
83
+ )
84
+
85
+ else:
86
+ import sys
87
+
88
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc ADDED
Binary file (6.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Chinese-CLIP model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import TensorType
25
+
26
+ from ...configuration_utils import PretrainedConfig
27
+ from ...onnx import OnnxConfig
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
34
+ "OFA-Sys/chinese-clip-vit-base-patch16": (
35
+ "https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/config.json"
36
+ ),
37
+ }
38
+
39
+
40
+ class ChineseCLIPTextConfig(PretrainedConfig):
41
+ r"""
42
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a
43
+ Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a
44
+ configuration with the defaults will yield a similar configuration to that of the Chinese CLIP
45
+ [OFA-Sys/chinese-clip-vit-base-patch16](https:
46
+ //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
47
+
48
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
49
+ documentation from [`PretrainedConfig`] for more information.
50
+
51
+
52
+ Args:
53
+ vocab_size (`int`, *optional*, defaults to 30522):
54
+ Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented
55
+ by the `inputs_ids` passed when calling [`ChineseCLIPModel`].
56
+ hidden_size (`int`, *optional*, defaults to 768):
57
+ Dimensionality of the encoder layers and the pooler layer.
58
+ num_hidden_layers (`int`, *optional*, defaults to 12):
59
+ Number of hidden layers in the Transformer encoder.
60
+ num_attention_heads (`int`, *optional*, defaults to 12):
61
+ Number of attention heads for each attention layer in the Transformer encoder.
62
+ intermediate_size (`int`, *optional*, defaults to 3072):
63
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
64
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
65
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
66
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
67
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
68
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
69
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
70
+ The dropout ratio for the attention probabilities.
71
+ max_position_embeddings (`int`, *optional*, defaults to 512):
72
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
73
+ just in case (e.g., 512 or 1024 or 2048).
74
+ type_vocab_size (`int`, *optional*, defaults to 2):
75
+ The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`].
76
+ initializer_range (`float`, *optional*, defaults to 0.02):
77
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
78
+ initializer_factor (`float`, *optional*, defaults to 1.0):
79
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
80
+ testing).
81
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
82
+ The epsilon used by the layer normalization layers.
83
+ pad_token_id (`int`, *optional*, defaults to 0):
84
+ Padding token id.
85
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
86
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
87
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
88
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
89
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
90
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
91
+ use_cache (`bool`, *optional*, defaults to `True`):
92
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
93
+ relevant if `config.is_decoder=True`.
94
+
95
+ Example:
96
+
97
+ ```python
98
+ >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel
99
+
100
+ >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
101
+ >>> configuration = ChineseCLIPTextConfig()
102
+
103
+ >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
104
+ >>> model = ChineseCLIPTextModel(configuration)
105
+
106
+ >>> # Accessing the model configuration
107
+ >>> configuration = model.config
108
+ ```"""
109
+
110
+ model_type = "chinese_clip_text_model"
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_size=30522,
115
+ hidden_size=768,
116
+ num_hidden_layers=12,
117
+ num_attention_heads=12,
118
+ intermediate_size=3072,
119
+ hidden_act="gelu",
120
+ hidden_dropout_prob=0.1,
121
+ attention_probs_dropout_prob=0.1,
122
+ max_position_embeddings=512,
123
+ type_vocab_size=2,
124
+ initializer_range=0.02,
125
+ initializer_factor=1.0,
126
+ layer_norm_eps=1e-12,
127
+ pad_token_id=0,
128
+ position_embedding_type="absolute",
129
+ use_cache=True,
130
+ **kwargs,
131
+ ):
132
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
133
+
134
+ self.vocab_size = vocab_size
135
+ self.hidden_size = hidden_size
136
+ self.num_hidden_layers = num_hidden_layers
137
+ self.num_attention_heads = num_attention_heads
138
+ self.hidden_act = hidden_act
139
+ self.intermediate_size = intermediate_size
140
+ self.hidden_dropout_prob = hidden_dropout_prob
141
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
142
+ self.max_position_embeddings = max_position_embeddings
143
+ self.type_vocab_size = type_vocab_size
144
+ self.initializer_range = initializer_range
145
+ self.initializer_factor = initializer_factor
146
+ self.layer_norm_eps = layer_norm_eps
147
+ self.position_embedding_type = position_embedding_type
148
+ self.use_cache = use_cache
149
+
150
+ @classmethod
151
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
152
+ cls._set_token_in_kwargs(kwargs)
153
+
154
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
155
+
156
+ # get the vision config dict if we are loading from ChineseCLIPConfig
157
+ if config_dict.get("model_type") == "chinese_clip":
158
+ config_dict = config_dict["text_config"]
159
+
160
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
161
+ logger.warning(
162
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
163
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
164
+ )
165
+
166
+ return cls.from_dict(config_dict, **kwargs)
167
+
168
+
169
+ class ChineseCLIPVisionConfig(PretrainedConfig):
170
+ r"""
171
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an
172
+ ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a
173
+ configuration with the defaults will yield a similar configuration to that of the ChineseCLIP
174
+ [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
175
+
176
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
177
+ documentation from [`PretrainedConfig`] for more information.
178
+
179
+
180
+ Args:
181
+ hidden_size (`int`, *optional*, defaults to 768):
182
+ Dimensionality of the encoder layers and the pooler layer.
183
+ intermediate_size (`int`, *optional*, defaults to 3072):
184
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
185
+ projection_dim (`int`, *optional*, defaults to 512):
186
+ Dimentionality of text and vision projection layers.
187
+ num_hidden_layers (`int`, *optional*, defaults to 12):
188
+ Number of hidden layers in the Transformer encoder.
189
+ num_attention_heads (`int`, *optional*, defaults to 12):
190
+ Number of attention heads for each attention layer in the Transformer encoder.
191
+ num_channels (`int`, *optional*, defaults to 3):
192
+ The number of input channels.
193
+ image_size (`int`, *optional*, defaults to 224):
194
+ The size (resolution) of each image.
195
+ patch_size (`int`, *optional*, defaults to 32):
196
+ The size (resolution) of each patch.
197
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
198
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
199
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
200
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
201
+ The epsilon used by the layer normalization layers.
202
+ attention_dropout (`float`, *optional*, defaults to 0.0):
203
+ The dropout ratio for the attention probabilities.
204
+ initializer_range (`float`, *optional*, defaults to 0.02):
205
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
206
+ initializer_factor (`float`, *optional*, defaults to 1.0):
207
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
208
+ testing).
209
+ Example:
210
+ ```python
211
+ >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel
212
+
213
+ >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
214
+ >>> configuration = ChineseCLIPVisionConfig()
215
+
216
+ >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
217
+ >>> model = ChineseCLIPVisionModel(configuration)
218
+
219
+ >>> # Accessing the model configuration
220
+ >>> configuration = model.config
221
+ ```"""
222
+
223
+ model_type = "chinese_clip_vision_model"
224
+
225
+ def __init__(
226
+ self,
227
+ hidden_size=768,
228
+ intermediate_size=3072,
229
+ projection_dim=512,
230
+ num_hidden_layers=12,
231
+ num_attention_heads=12,
232
+ num_channels=3,
233
+ image_size=224,
234
+ patch_size=32,
235
+ hidden_act="quick_gelu",
236
+ layer_norm_eps=1e-5,
237
+ attention_dropout=0.0,
238
+ initializer_range=0.02,
239
+ initializer_factor=1.0,
240
+ **kwargs,
241
+ ):
242
+ super().__init__(**kwargs)
243
+
244
+ self.hidden_size = hidden_size
245
+ self.intermediate_size = intermediate_size
246
+ self.projection_dim = projection_dim
247
+ self.num_hidden_layers = num_hidden_layers
248
+ self.num_attention_heads = num_attention_heads
249
+ self.num_channels = num_channels
250
+ self.patch_size = patch_size
251
+ self.image_size = image_size
252
+ self.initializer_range = initializer_range
253
+ self.initializer_factor = initializer_factor
254
+ self.attention_dropout = attention_dropout
255
+ self.layer_norm_eps = layer_norm_eps
256
+ self.hidden_act = hidden_act
257
+
258
+ @classmethod
259
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
260
+ cls._set_token_in_kwargs(kwargs)
261
+
262
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
263
+
264
+ # get the vision config dict if we are loading from ChineseCLIPConfig
265
+ if config_dict.get("model_type") == "chinese_clip":
266
+ config_dict = config_dict["vision_config"]
267
+
268
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
269
+ logger.warning(
270
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
271
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
272
+ )
273
+
274
+ return cls.from_dict(config_dict, **kwargs)
275
+
276
+
277
+ class ChineseCLIPConfig(PretrainedConfig):
278
+ r"""
279
+ [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used
280
+ to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model
281
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
282
+ Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
283
+ architecture.
284
+
285
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
286
+ documentation from [`PretrainedConfig`] for more information.
287
+
288
+ Args:
289
+ text_config (`dict`, *optional*):
290
+ Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`].
291
+ vision_config (`dict`, *optional*):
292
+ Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`].
293
+ projection_dim (`int`, *optional*, defaults to 512):
294
+ Dimentionality of text and vision projection layers.
295
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
296
+ The inital value of the *logit_scale* paramter. Default is used as per the original ChineseCLIP
297
+ implementation.
298
+ kwargs (*optional*):
299
+ Dictionary of keyword arguments.
300
+
301
+ Example:
302
+
303
+ ```python
304
+ >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel
305
+
306
+ >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
307
+ >>> configuration = ChineseCLIPConfig()
308
+
309
+ >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
310
+ >>> model = ChineseCLIPModel(configuration)
311
+
312
+ >>> # Accessing the model configuration
313
+ >>> configuration = model.config
314
+
315
+ >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig
316
+
317
+ >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration
318
+ >>> config_text = ChineseCLIPTextConfig()
319
+ >>> config_vision = ChineseCLIPVisionConfig()
320
+
321
+ >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision)
322
+ ```"""
323
+
324
+ model_type = "chinese_clip"
325
+
326
+ def __init__(
327
+ self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
328
+ ):
329
+ # If `_config_dict` exist, we use them for the backward compatibility.
330
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
331
+ # of confusion!).
332
+ text_config_dict = kwargs.pop("text_config_dict", None)
333
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
334
+
335
+ super().__init__(**kwargs)
336
+
337
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
338
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
339
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
340
+ if text_config_dict is not None:
341
+ if text_config is None:
342
+ text_config = {}
343
+
344
+ # This is the complete result when using `text_config_dict`.
345
+ _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict()
346
+
347
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
348
+ for key, value in _text_config_dict.items():
349
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
350
+ # If specified in `text_config_dict`
351
+ if key in text_config_dict:
352
+ message = (
353
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
354
+ f'The value `text_config_dict["{key}"]` will be used instead.'
355
+ )
356
+ # If inferred from default argument values (just to be super careful)
357
+ else:
358
+ message = (
359
+ f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. "
360
+ f'The value `text_config["{key}"]` will be overriden.'
361
+ )
362
+ logger.info(message)
363
+
364
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
365
+ text_config.update(_text_config_dict)
366
+
367
+ if vision_config_dict is not None:
368
+ if vision_config is None:
369
+ vision_config = {}
370
+
371
+ # This is the complete result when using `vision_config_dict`.
372
+ _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict()
373
+ # convert keys to string instead of integer
374
+ if "id2label" in _vision_config_dict:
375
+ _vision_config_dict["id2label"] = {
376
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
377
+ }
378
+
379
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
380
+ for key, value in _vision_config_dict.items():
381
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
382
+ # If specified in `vision_config_dict`
383
+ if key in vision_config_dict:
384
+ message = (
385
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
386
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
387
+ )
388
+ # If inferred from default argument values (just to be super careful)
389
+ else:
390
+ message = (
391
+ f"`vision_config_dict` is provided which will be used to initialize "
392
+ f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overriden.'
393
+ )
394
+ logger.info(message)
395
+
396
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
397
+ vision_config.update(_vision_config_dict)
398
+
399
+ if text_config is None:
400
+ text_config = {}
401
+ logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.")
402
+
403
+ if vision_config is None:
404
+ vision_config = {}
405
+ logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.")
406
+
407
+ self.text_config = ChineseCLIPTextConfig(**text_config)
408
+ self.vision_config = ChineseCLIPVisionConfig(**vision_config)
409
+
410
+ self.projection_dim = projection_dim
411
+ self.logit_scale_init_value = logit_scale_init_value
412
+ self.initializer_factor = 1.0
413
+ self.initializer_range = 0.02
414
+
415
+ @classmethod
416
+ def from_text_vision_configs(
417
+ cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs
418
+ ):
419
+ r"""
420
+ Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and
421
+ Chinese-CLIP vision model configuration. Returns:
422
+ [`ChineseCLIPConfig`]: An instance of a configuration object
423
+ """
424
+
425
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
426
+
427
+
428
+ class ChineseCLIPOnnxConfig(OnnxConfig):
429
+ @property
430
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
431
+ return OrderedDict(
432
+ [
433
+ ("input_ids", {0: "batch", 1: "sequence"}),
434
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
435
+ ("attention_mask", {0: "batch", 1: "sequence"}),
436
+ ]
437
+ )
438
+
439
+ @property
440
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
441
+ return OrderedDict(
442
+ [
443
+ ("logits_per_image", {0: "batch"}),
444
+ ("logits_per_text", {0: "batch"}),
445
+ ("text_embeds", {0: "batch"}),
446
+ ("image_embeds", {0: "batch"}),
447
+ ]
448
+ )
449
+
450
+ @property
451
+ def atol_for_validation(self) -> float:
452
+ return 1e-4
453
+
454
+ def generate_dummy_inputs(
455
+ self,
456
+ processor: "ProcessorMixin",
457
+ batch_size: int = -1,
458
+ seq_length: int = -1,
459
+ framework: Optional["TensorType"] = None,
460
+ ) -> Mapping[str, Any]:
461
+ text_input_dict = super().generate_dummy_inputs(
462
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
463
+ )
464
+ image_input_dict = super().generate_dummy_inputs(
465
+ processor.image_processor, batch_size=batch_size, framework=framework
466
+ )
467
+ return {**text_input_dict, **image_input_dict}
468
+
469
+ @property
470
+ def default_onnx_opset(self) -> int:
471
+ return 14
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+
18
+ import torch
19
+
20
+ from transformers import ChineseCLIPConfig, ChineseCLIPModel
21
+
22
+
23
+ def copy_attn_layer(hf_attn_layer, pt_weights, prefix):
24
+ q_proj, k_proj, v_proj = pt_weights[f"{prefix}.in_proj_weight"].chunk(3, dim=0)
25
+ q_proj_bias, k_proj_bias, v_proj_bias = pt_weights[f"{prefix}.in_proj_bias"].chunk(3, dim=0)
26
+
27
+ out_proj_weights = pt_weights[f"{prefix}.out_proj.weight"]
28
+ out_proj_bias = pt_weights[f"{prefix}.out_proj.bias"]
29
+
30
+ hf_attn_layer.q_proj.weight.data = q_proj
31
+ hf_attn_layer.q_proj.bias.data = q_proj_bias
32
+
33
+ hf_attn_layer.k_proj.weight.data = k_proj
34
+ hf_attn_layer.k_proj.bias.data = k_proj_bias
35
+
36
+ hf_attn_layer.v_proj.weight.data = v_proj
37
+ hf_attn_layer.v_proj.bias.data = v_proj_bias
38
+
39
+ hf_attn_layer.out_proj.weight.data = out_proj_weights
40
+ hf_attn_layer.out_proj.bias.data = out_proj_bias
41
+
42
+
43
+ def copy_mlp(hf_mlp, pt_weights, prefix):
44
+ copy_linear(hf_mlp.fc1, pt_weights, f"{prefix}.c_fc")
45
+ copy_linear(hf_mlp.fc2, pt_weights, f"{prefix}.c_proj")
46
+
47
+
48
+ def copy_linear(hf_linear, pt_weights, prefix):
49
+ hf_linear.weight.data = pt_weights[f"{prefix}.weight"].data
50
+ hf_linear.bias.data = pt_weights[f"{prefix}.bias"].data
51
+
52
+
53
+ def copy_layer(hf_layer, pt_weights, prefix):
54
+ # copy layer norms
55
+ copy_linear(hf_layer.layer_norm1, pt_weights, f"{prefix}.ln_1")
56
+ copy_linear(hf_layer.layer_norm2, pt_weights, f"{prefix}.ln_2")
57
+
58
+ # copy MLP
59
+ copy_mlp(hf_layer.mlp, pt_weights, f"{prefix}.mlp")
60
+
61
+ # copy attn
62
+ copy_attn_layer(hf_layer.self_attn, pt_weights, f"{prefix}.attn")
63
+
64
+
65
+ def copy_layers(hf_layers, pt_weights, prefix):
66
+ for layer_id, hf_layer in enumerate(hf_layers):
67
+ copy_layer(hf_layer, pt_weights, f"{prefix}.{layer_id}")
68
+
69
+
70
+ def copy_text_model_and_projection(hf_model, pt_weights):
71
+ # copy projection
72
+ hf_model.text_projection.weight.data = pt_weights["text_projection"].data.T
73
+
74
+ # copy text encoder
75
+ for name, param in hf_model.text_model.named_parameters():
76
+ param.data = pt_weights[f"bert.{name}"].data
77
+
78
+
79
+ def copy_vision_model_and_projection(hf_model, pt_weights):
80
+ # copy projection
81
+ hf_model.visual_projection.weight.data = pt_weights["visual.proj"].data.T
82
+
83
+ # copy layer norms
84
+ copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, "visual.ln_pre")
85
+ copy_linear(hf_model.vision_model.post_layernorm, pt_weights, "visual.ln_post")
86
+
87
+ # copy embeddings
88
+ hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights["visual.conv1.weight"].data
89
+ hf_model.vision_model.embeddings.class_embedding.data = pt_weights["visual.class_embedding"].data
90
+ hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights["visual.positional_embedding"].data
91
+
92
+ # copy encoder
93
+ copy_layers(hf_model.vision_model.encoder.layers, pt_weights, "visual.transformer.resblocks")
94
+
95
+
96
+ @torch.no_grad()
97
+ def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
98
+ """
99
+ Copy/paste/tweak model's weights to transformers design.
100
+ """
101
+
102
+ assert config_path is not None, "Please specify the ChineseCLIP model config of the corresponding model size."
103
+ config = ChineseCLIPConfig.from_pretrained(config_path)
104
+
105
+ hf_model = ChineseCLIPModel(config).eval()
106
+
107
+ pt_weights = torch.load(checkpoint_path, map_location="cpu")["state_dict"]
108
+ pt_weights = {(name[7:] if name.startswith("module.") else name): value for name, value in pt_weights.items()}
109
+
110
+ copy_text_model_and_projection(hf_model, pt_weights)
111
+ copy_vision_model_and_projection(hf_model, pt_weights)
112
+ hf_model.logit_scale.data = pt_weights["logit_scale"].data
113
+
114
+ hf_model.save_pretrained(pytorch_dump_folder_path)
115
+
116
+
117
+ if __name__ == "__main__":
118
+ parser = argparse.ArgumentParser()
119
+ parser.add_argument(
120
+ "--pytorch_dump_folder_path",
121
+ default=None,
122
+ type=str,
123
+ help="Path to the output folder storing converted hf PyTorch model.",
124
+ )
125
+ parser.add_argument(
126
+ "--checkpoint_path", default=None, type=str, help="Path to original github format ChineseCLIP checkpoint."
127
+ )
128
+ parser.add_argument(
129
+ "--config_path", default=None, required=True, type=str, help="Path to hf config.json of model to convert."
130
+ )
131
+ args = parser.parse_args()
132
+
133
+ convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
134
+ print("The conversion is finished!")
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Chinese-CLIP."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_chinese_clip import ChineseCLIPImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ChineseCLIPImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Chinese-CLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ convert_to_rgb,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ OPENAI_CLIP_MEAN,
30
+ OPENAI_CLIP_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ if is_vision_available():
49
+ import PIL
50
+
51
+
52
+ class ChineseCLIPImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a Chinese-CLIP image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
59
+ `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
61
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
62
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
63
+ method.
64
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
65
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
68
+ `preprocess` method.
69
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
70
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
71
+ method.
72
+ do_rescale (`bool`, *optional*, defaults to `True`):
73
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
74
+ the `preprocess` method.
75
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
76
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
77
+ method.
78
+ do_normalize (`bool`, *optional*, defaults to `True`):
79
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
87
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
88
+ Whether to convert the image to RGB.
89
+ """
90
+
91
+ model_input_names = ["pixel_values"]
92
+
93
+ def __init__(
94
+ self,
95
+ do_resize: bool = True,
96
+ size: Dict[str, int] = None,
97
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
98
+ do_center_crop: bool = True,
99
+ crop_size: Dict[str, int] = None,
100
+ do_rescale: bool = True,
101
+ rescale_factor: Union[int, float] = 1 / 255,
102
+ do_normalize: bool = True,
103
+ image_mean: Optional[Union[float, List[float]]] = None,
104
+ image_std: Optional[Union[float, List[float]]] = None,
105
+ do_convert_rgb: bool = True,
106
+ **kwargs,
107
+ ) -> None:
108
+ super().__init__(**kwargs)
109
+ size = size if size is not None else {"shortest_edge": 224}
110
+ size = get_size_dict(size, default_to_square=False)
111
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
112
+ crop_size = get_size_dict(crop_size)
113
+
114
+ self.do_resize = do_resize
115
+ self.size = size
116
+ self.resample = resample
117
+ self.do_center_crop = do_center_crop
118
+ self.crop_size = crop_size
119
+ self.do_rescale = do_rescale
120
+ self.rescale_factor = rescale_factor
121
+ self.do_normalize = do_normalize
122
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
123
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
124
+ self.do_convert_rgb = do_convert_rgb
125
+ self._valid_processor_keys = [
126
+ "images",
127
+ "do_resize",
128
+ "size",
129
+ "resample",
130
+ "do_center_crop",
131
+ "crop_size",
132
+ "do_rescale",
133
+ "rescale_factor",
134
+ "do_normalize",
135
+ "image_mean",
136
+ "image_std",
137
+ "do_convert_rgb",
138
+ "return_tensors",
139
+ "data_format",
140
+ "input_data_format",
141
+ ]
142
+
143
+ def resize(
144
+ self,
145
+ image: np.ndarray,
146
+ size: Dict[str, int],
147
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
148
+ data_format: Optional[Union[str, ChannelDimension]] = None,
149
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
150
+ **kwargs,
151
+ ) -> np.ndarray:
152
+ """
153
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
154
+ resized to keep the input aspect ratio.
155
+
156
+ Args:
157
+ image (`np.ndarray`):
158
+ Image to resize.
159
+ size (`Dict[str, int]`):
160
+ Size of the output image.
161
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
162
+ Resampling filter to use when resiizing the image.
163
+ data_format (`str` or `ChannelDimension`, *optional*):
164
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
165
+ input_data_format (`ChannelDimension` or `str`, *optional*):
166
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
167
+ image.
168
+ """
169
+ size = get_size_dict(size, default_to_square=False)
170
+ output_size = get_resize_output_image_size(
171
+ image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format
172
+ )
173
+ return resize(
174
+ image,
175
+ size=output_size,
176
+ resample=resample,
177
+ data_format=data_format,
178
+ input_data_format=input_data_format,
179
+ **kwargs,
180
+ )
181
+
182
+ def preprocess(
183
+ self,
184
+ images: ImageInput,
185
+ do_resize: bool = None,
186
+ size: Dict[str, int] = None,
187
+ resample: PILImageResampling = None,
188
+ do_center_crop: bool = None,
189
+ crop_size: int = None,
190
+ do_rescale: bool = None,
191
+ rescale_factor: float = None,
192
+ do_normalize: bool = None,
193
+ image_mean: Optional[Union[float, List[float]]] = None,
194
+ image_std: Optional[Union[float, List[float]]] = None,
195
+ do_convert_rgb: bool = None,
196
+ return_tensors: Optional[Union[str, TensorType]] = None,
197
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
198
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
199
+ **kwargs,
200
+ ) -> PIL.Image.Image:
201
+ """
202
+ Preprocess an image or batch of images.
203
+
204
+ Args:
205
+ images (`ImageInput`):
206
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
207
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
208
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
209
+ Whether to resize the image.
210
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
211
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
212
+ the longest edge resized to keep the input aspect ratio.
213
+ resample (`int`, *optional*, defaults to `self.resample`):
214
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
215
+ has an effect if `do_resize` is set to `True`.
216
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
217
+ Whether to center crop the image.
218
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
219
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
220
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
221
+ Whether to rescale the image.
222
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
223
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
224
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
225
+ Whether to normalize the image.
226
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
227
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
228
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
229
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
230
+ `True`.
231
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
232
+ Whether to convert the image to RGB.
233
+ return_tensors (`str` or `TensorType`, *optional*):
234
+ The type of tensors to return. Can be one of:
235
+ - Unset: Return a list of `np.ndarray`.
236
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
237
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
238
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
239
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
240
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
241
+ The channel dimension format for the output image. Can be one of:
242
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
243
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
244
+ - Unset: Use the channel dimension format of the input image.
245
+ input_data_format (`ChannelDimension` or `str`, *optional*):
246
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
247
+ from the input image. Can be one of:
248
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
249
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
250
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
251
+ """
252
+ do_resize = do_resize if do_resize is not None else self.do_resize
253
+ size = size if size is not None else self.size
254
+ size = get_size_dict(size, default_to_square=False)
255
+ resample = resample if resample is not None else self.resample
256
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
257
+ crop_size = crop_size if crop_size is not None else self.crop_size
258
+ crop_size = get_size_dict(crop_size)
259
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
260
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
261
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
262
+ image_mean = image_mean if image_mean is not None else self.image_mean
263
+ image_std = image_std if image_std is not None else self.image_std
264
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
265
+
266
+ images = make_list_of_images(images)
267
+
268
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
269
+
270
+ if not valid_images(images):
271
+ raise ValueError(
272
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
273
+ "torch.Tensor, tf.Tensor or jax.ndarray."
274
+ )
275
+ validate_preprocess_arguments(
276
+ do_rescale=do_rescale,
277
+ rescale_factor=rescale_factor,
278
+ do_normalize=do_normalize,
279
+ image_mean=image_mean,
280
+ image_std=image_std,
281
+ do_center_crop=do_center_crop,
282
+ crop_size=crop_size,
283
+ do_resize=do_resize,
284
+ size=size,
285
+ resample=resample,
286
+ )
287
+ if do_convert_rgb:
288
+ images = [convert_to_rgb(image) for image in images]
289
+
290
+ # All transformations expect numpy arrays.
291
+ images = [to_numpy_array(image) for image in images]
292
+
293
+ if is_scaled_image(images[0]) and do_rescale:
294
+ logger.warning_once(
295
+ "It looks like you are trying to rescale already rescaled images. If the input"
296
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
297
+ )
298
+
299
+ if input_data_format is None:
300
+ # We assume that all images have the same channel dimension format.
301
+ input_data_format = infer_channel_dimension_format(images[0])
302
+
303
+ if do_resize:
304
+ images = [
305
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
306
+ for image in images
307
+ ]
308
+
309
+ if do_center_crop:
310
+ images = [
311
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
312
+ ]
313
+
314
+ if do_rescale:
315
+ images = [
316
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
317
+ for image in images
318
+ ]
319
+
320
+ if do_normalize:
321
+ images = [
322
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
323
+ for image in images
324
+ ]
325
+
326
+ images = [
327
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
328
+ ]
329
+
330
+ data = {"pixel_values": images}
331
+ return BatchFeature(data=data, tensor_type=return_tensors)
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py ADDED
@@ -0,0 +1,1564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Chinese-CLIP model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPooling,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_code_sample_docstrings,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16"
49
+ _CONFIG_FOR_DOC = "ChineseCLIPConfig"
50
+
51
+ CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
52
+ "OFA-Sys/chinese-clip-vit-base-patch16",
53
+ # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip
54
+ ]
55
+
56
+
57
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
58
+ # Copied from transformers.models.clip.modeling_clip.contrastive_loss
59
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
60
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
61
+
62
+
63
+ def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor:
64
+ caption_loss = contrastive_loss(similarity)
65
+ image_loss = contrastive_loss(similarity.t())
66
+ return (caption_loss + image_loss) / 2.0
67
+
68
+
69
+ @dataclass
70
+ class ChineseCLIPOutput(ModelOutput):
71
+ """
72
+ Args:
73
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
74
+ Contrastive loss for image-text similarity.
75
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
76
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
77
+ similarity scores.
78
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
79
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
80
+ similarity scores.
81
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
82
+ The text embeddings obtained by applying the projection layer to the pooled output of
83
+ [`ChineseCLIPTextModel`].
84
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
85
+ The image embeddings obtained by applying the projection layer to the pooled output of
86
+ [`ChineseCLIPVisionModel`].
87
+ text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
88
+ The output of the [`ChineseCLIPTextModel`].
89
+ vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
90
+ The output of the [`ChineseCLIPVisionModel`].
91
+ """
92
+
93
+ loss: Optional[torch.FloatTensor] = None
94
+ logits_per_image: torch.FloatTensor = None
95
+ logits_per_text: torch.FloatTensor = None
96
+ text_embeds: torch.FloatTensor = None
97
+ image_embeds: torch.FloatTensor = None
98
+ text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
99
+ vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
100
+
101
+ def to_tuple(self) -> Tuple[Any]:
102
+ return tuple(
103
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
104
+ for k in self.keys()
105
+ )
106
+
107
+
108
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText
109
+ class ChineseCLIPTextEmbeddings(nn.Module):
110
+ """Construct the embeddings from word, position and token_type embeddings."""
111
+
112
+ def __init__(self, config):
113
+ super().__init__()
114
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
115
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
116
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
117
+
118
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
119
+ # any TensorFlow checkpoint file
120
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
121
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
122
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
123
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
124
+ self.register_buffer(
125
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
126
+ )
127
+ self.register_buffer(
128
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
129
+ )
130
+
131
+ def forward(
132
+ self,
133
+ input_ids: Optional[torch.LongTensor] = None,
134
+ token_type_ids: Optional[torch.LongTensor] = None,
135
+ position_ids: Optional[torch.LongTensor] = None,
136
+ inputs_embeds: Optional[torch.FloatTensor] = None,
137
+ past_key_values_length: int = 0,
138
+ ) -> torch.Tensor:
139
+ if input_ids is not None:
140
+ input_shape = input_ids.size()
141
+ else:
142
+ input_shape = inputs_embeds.size()[:-1]
143
+
144
+ seq_length = input_shape[1]
145
+
146
+ if position_ids is None:
147
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
148
+
149
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
150
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
151
+ # issue #5664
152
+ if token_type_ids is None:
153
+ if hasattr(self, "token_type_ids"):
154
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
155
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
156
+ token_type_ids = buffered_token_type_ids_expanded
157
+ else:
158
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
159
+
160
+ if inputs_embeds is None:
161
+ inputs_embeds = self.word_embeddings(input_ids)
162
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
163
+
164
+ embeddings = inputs_embeds + token_type_embeddings
165
+ if self.position_embedding_type == "absolute":
166
+ position_embeddings = self.position_embeddings(position_ids)
167
+ embeddings += position_embeddings
168
+ embeddings = self.LayerNorm(embeddings)
169
+ embeddings = self.dropout(embeddings)
170
+ return embeddings
171
+
172
+
173
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP
174
+ class ChineseCLIPVisionEmbeddings(nn.Module):
175
+ def __init__(self, config: ChineseCLIPVisionConfig):
176
+ super().__init__()
177
+ self.config = config
178
+ self.embed_dim = config.hidden_size
179
+ self.image_size = config.image_size
180
+ self.patch_size = config.patch_size
181
+
182
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
183
+
184
+ self.patch_embedding = nn.Conv2d(
185
+ in_channels=config.num_channels,
186
+ out_channels=self.embed_dim,
187
+ kernel_size=self.patch_size,
188
+ stride=self.patch_size,
189
+ bias=False,
190
+ )
191
+
192
+ self.num_patches = (self.image_size // self.patch_size) ** 2
193
+ self.num_positions = self.num_patches + 1
194
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
195
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
196
+
197
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
198
+ batch_size = pixel_values.shape[0]
199
+ target_dtype = self.patch_embedding.weight.dtype
200
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
201
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
202
+
203
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
204
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
205
+ embeddings = embeddings + self.position_embedding(self.position_ids)
206
+ return embeddings
207
+
208
+
209
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText
210
+ class ChineseCLIPTextSelfAttention(nn.Module):
211
+ def __init__(self, config, position_embedding_type=None):
212
+ super().__init__()
213
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
214
+ raise ValueError(
215
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
216
+ f"heads ({config.num_attention_heads})"
217
+ )
218
+
219
+ self.num_attention_heads = config.num_attention_heads
220
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
221
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
222
+
223
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
224
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
225
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
226
+
227
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
228
+ self.position_embedding_type = position_embedding_type or getattr(
229
+ config, "position_embedding_type", "absolute"
230
+ )
231
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
232
+ self.max_position_embeddings = config.max_position_embeddings
233
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
234
+
235
+ self.is_decoder = config.is_decoder
236
+
237
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
238
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
239
+ x = x.view(new_x_shape)
240
+ return x.permute(0, 2, 1, 3)
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.Tensor,
245
+ attention_mask: Optional[torch.FloatTensor] = None,
246
+ head_mask: Optional[torch.FloatTensor] = None,
247
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
248
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
249
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
250
+ output_attentions: Optional[bool] = False,
251
+ ) -> Tuple[torch.Tensor]:
252
+ mixed_query_layer = self.query(hidden_states)
253
+
254
+ # If this is instantiated as a cross-attention module, the keys
255
+ # and values come from an encoder; the attention mask needs to be
256
+ # such that the encoder's padding tokens are not attended to.
257
+ is_cross_attention = encoder_hidden_states is not None
258
+
259
+ if is_cross_attention and past_key_value is not None:
260
+ # reuse k,v, cross_attentions
261
+ key_layer = past_key_value[0]
262
+ value_layer = past_key_value[1]
263
+ attention_mask = encoder_attention_mask
264
+ elif is_cross_attention:
265
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
266
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
267
+ attention_mask = encoder_attention_mask
268
+ elif past_key_value is not None:
269
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
270
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
271
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
272
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
273
+ else:
274
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
275
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
276
+
277
+ query_layer = self.transpose_for_scores(mixed_query_layer)
278
+
279
+ use_cache = past_key_value is not None
280
+ if self.is_decoder:
281
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
282
+ # Further calls to cross_attention layer can then reuse all cross-attention
283
+ # key/value_states (first "if" case)
284
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
285
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
286
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
287
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
288
+ past_key_value = (key_layer, value_layer)
289
+
290
+ # Take the dot product between "query" and "key" to get the raw attention scores.
291
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
292
+
293
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
294
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
295
+ if use_cache:
296
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
297
+ -1, 1
298
+ )
299
+ else:
300
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
301
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
302
+ distance = position_ids_l - position_ids_r
303
+
304
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
305
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
306
+
307
+ if self.position_embedding_type == "relative_key":
308
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
309
+ attention_scores = attention_scores + relative_position_scores
310
+ elif self.position_embedding_type == "relative_key_query":
311
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
312
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
313
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
314
+
315
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
316
+ if attention_mask is not None:
317
+ # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function)
318
+ attention_scores = attention_scores + attention_mask
319
+
320
+ # Normalize the attention scores to probabilities.
321
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
322
+
323
+ # This is actually dropping out entire tokens to attend to, which might
324
+ # seem a bit unusual, but is taken from the original Transformer paper.
325
+ attention_probs = self.dropout(attention_probs)
326
+
327
+ # Mask heads if we want to
328
+ if head_mask is not None:
329
+ attention_probs = attention_probs * head_mask
330
+
331
+ context_layer = torch.matmul(attention_probs, value_layer)
332
+
333
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
334
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
335
+ context_layer = context_layer.view(new_context_layer_shape)
336
+
337
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
338
+
339
+ if self.is_decoder:
340
+ outputs = outputs + (past_key_value,)
341
+ return outputs
342
+
343
+
344
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText
345
+ class ChineseCLIPTextSelfOutput(nn.Module):
346
+ def __init__(self, config):
347
+ super().__init__()
348
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
349
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
350
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
351
+
352
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
353
+ hidden_states = self.dense(hidden_states)
354
+ hidden_states = self.dropout(hidden_states)
355
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
356
+ return hidden_states
357
+
358
+
359
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText
360
+ class ChineseCLIPTextAttention(nn.Module):
361
+ def __init__(self, config, position_embedding_type=None):
362
+ super().__init__()
363
+ self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type)
364
+ self.output = ChineseCLIPTextSelfOutput(config)
365
+ self.pruned_heads = set()
366
+
367
+ def prune_heads(self, heads):
368
+ if len(heads) == 0:
369
+ return
370
+ heads, index = find_pruneable_heads_and_indices(
371
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
372
+ )
373
+
374
+ # Prune linear layers
375
+ self.self.query = prune_linear_layer(self.self.query, index)
376
+ self.self.key = prune_linear_layer(self.self.key, index)
377
+ self.self.value = prune_linear_layer(self.self.value, index)
378
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
379
+
380
+ # Update hyper params and store pruned heads
381
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
382
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
383
+ self.pruned_heads = self.pruned_heads.union(heads)
384
+
385
+ def forward(
386
+ self,
387
+ hidden_states: torch.Tensor,
388
+ attention_mask: Optional[torch.FloatTensor] = None,
389
+ head_mask: Optional[torch.FloatTensor] = None,
390
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
391
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
392
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
393
+ output_attentions: Optional[bool] = False,
394
+ ) -> Tuple[torch.Tensor]:
395
+ self_outputs = self.self(
396
+ hidden_states,
397
+ attention_mask,
398
+ head_mask,
399
+ encoder_hidden_states,
400
+ encoder_attention_mask,
401
+ past_key_value,
402
+ output_attentions,
403
+ )
404
+ attention_output = self.output(self_outputs[0], hidden_states)
405
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
406
+ return outputs
407
+
408
+
409
+ class ChineseCLIPVisionAttention(nn.Module):
410
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
411
+
412
+ def __init__(self, config):
413
+ super().__init__()
414
+ self.config = config
415
+ self.embed_dim = config.hidden_size
416
+ self.num_heads = config.num_attention_heads
417
+ self.head_dim = self.embed_dim // self.num_heads
418
+ if self.head_dim * self.num_heads != self.embed_dim:
419
+ raise ValueError(
420
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
421
+ f" {self.num_heads})."
422
+ )
423
+ self.scale = self.head_dim**-0.5
424
+ self.dropout = config.attention_dropout
425
+
426
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
427
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
428
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
429
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
430
+
431
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
432
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
433
+
434
+ def forward(
435
+ self,
436
+ hidden_states: torch.Tensor,
437
+ output_attentions: Optional[bool] = False,
438
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
439
+ """Input shape: Batch x Time x Channel"""
440
+
441
+ bsz, tgt_len, embed_dim = hidden_states.size()
442
+
443
+ # get query proj
444
+ query_states = self.q_proj(hidden_states) * self.scale
445
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
446
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
447
+
448
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
449
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
450
+ key_states = key_states.view(*proj_shape)
451
+ value_states = value_states.view(*proj_shape)
452
+
453
+ src_len = key_states.size(1)
454
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
455
+
456
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
457
+ raise ValueError(
458
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
459
+ f" {attn_weights.size()}"
460
+ )
461
+
462
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
463
+
464
+ if output_attentions:
465
+ # this operation is a bit akward, but it's required to
466
+ # make sure that attn_weights keeps its gradient.
467
+ # In order to do so, attn_weights have to reshaped
468
+ # twice and have to be reused in the following
469
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
470
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
471
+ else:
472
+ attn_weights_reshaped = None
473
+
474
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
475
+
476
+ attn_output = torch.bmm(attn_probs, value_states)
477
+
478
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
479
+ raise ValueError(
480
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
481
+ f" {attn_output.size()}"
482
+ )
483
+
484
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
485
+ attn_output = attn_output.transpose(1, 2)
486
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
487
+
488
+ attn_output = self.out_proj(attn_output)
489
+
490
+ return attn_output, attn_weights_reshaped
491
+
492
+
493
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText
494
+ class ChineseCLIPTextIntermediate(nn.Module):
495
+ def __init__(self, config):
496
+ super().__init__()
497
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
498
+ if isinstance(config.hidden_act, str):
499
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
500
+ else:
501
+ self.intermediate_act_fn = config.hidden_act
502
+
503
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
504
+ hidden_states = self.dense(hidden_states)
505
+ hidden_states = self.intermediate_act_fn(hidden_states)
506
+ return hidden_states
507
+
508
+
509
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText
510
+ class ChineseCLIPTextOutput(nn.Module):
511
+ def __init__(self, config):
512
+ super().__init__()
513
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
514
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
515
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
516
+
517
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
518
+ hidden_states = self.dense(hidden_states)
519
+ hidden_states = self.dropout(hidden_states)
520
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
521
+ return hidden_states
522
+
523
+
524
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision
525
+ class ChineseCLIPVisionMLP(nn.Module):
526
+ def __init__(self, config):
527
+ super().__init__()
528
+ self.config = config
529
+ self.activation_fn = ACT2FN[config.hidden_act]
530
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
531
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
532
+
533
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
534
+ hidden_states = self.fc1(hidden_states)
535
+ hidden_states = self.activation_fn(hidden_states)
536
+ hidden_states = self.fc2(hidden_states)
537
+ return hidden_states
538
+
539
+
540
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText
541
+ class ChineseCLIPTextLayer(nn.Module):
542
+ def __init__(self, config):
543
+ super().__init__()
544
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
545
+ self.seq_len_dim = 1
546
+ self.attention = ChineseCLIPTextAttention(config)
547
+ self.is_decoder = config.is_decoder
548
+ self.add_cross_attention = config.add_cross_attention
549
+ if self.add_cross_attention:
550
+ if not self.is_decoder:
551
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
552
+ self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute")
553
+ self.intermediate = ChineseCLIPTextIntermediate(config)
554
+ self.output = ChineseCLIPTextOutput(config)
555
+
556
+ def forward(
557
+ self,
558
+ hidden_states: torch.Tensor,
559
+ attention_mask: Optional[torch.FloatTensor] = None,
560
+ head_mask: Optional[torch.FloatTensor] = None,
561
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
562
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
563
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
564
+ output_attentions: Optional[bool] = False,
565
+ ) -> Tuple[torch.Tensor]:
566
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
567
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
568
+ self_attention_outputs = self.attention(
569
+ hidden_states,
570
+ attention_mask,
571
+ head_mask,
572
+ output_attentions=output_attentions,
573
+ past_key_value=self_attn_past_key_value,
574
+ )
575
+ attention_output = self_attention_outputs[0]
576
+
577
+ # if decoder, the last output is tuple of self-attn cache
578
+ if self.is_decoder:
579
+ outputs = self_attention_outputs[1:-1]
580
+ present_key_value = self_attention_outputs[-1]
581
+ else:
582
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
583
+
584
+ cross_attn_present_key_value = None
585
+ if self.is_decoder and encoder_hidden_states is not None:
586
+ if not hasattr(self, "crossattention"):
587
+ raise ValueError(
588
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
589
+ " by setting `config.add_cross_attention=True`"
590
+ )
591
+
592
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
593
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
594
+ cross_attention_outputs = self.crossattention(
595
+ attention_output,
596
+ attention_mask,
597
+ head_mask,
598
+ encoder_hidden_states,
599
+ encoder_attention_mask,
600
+ cross_attn_past_key_value,
601
+ output_attentions,
602
+ )
603
+ attention_output = cross_attention_outputs[0]
604
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
605
+
606
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
607
+ cross_attn_present_key_value = cross_attention_outputs[-1]
608
+ present_key_value = present_key_value + cross_attn_present_key_value
609
+
610
+ layer_output = apply_chunking_to_forward(
611
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
612
+ )
613
+ outputs = (layer_output,) + outputs
614
+
615
+ # if decoder, return the attn key/values as the last output
616
+ if self.is_decoder:
617
+ outputs = outputs + (present_key_value,)
618
+
619
+ return outputs
620
+
621
+ def feed_forward_chunk(self, attention_output):
622
+ intermediate_output = self.intermediate(attention_output)
623
+ layer_output = self.output(intermediate_output, attention_output)
624
+ return layer_output
625
+
626
+
627
+ class ChineseCLIPVisionLayer(nn.Module):
628
+ def __init__(self, config: ChineseCLIPConfig):
629
+ super().__init__()
630
+ self.embed_dim = config.hidden_size
631
+ self.self_attn = ChineseCLIPVisionAttention(config)
632
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
633
+ self.mlp = ChineseCLIPVisionMLP(config)
634
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
635
+
636
+ def forward(
637
+ self,
638
+ hidden_states: torch.Tensor,
639
+ output_attentions: Optional[bool] = False,
640
+ ) -> Tuple[torch.FloatTensor]:
641
+ """
642
+ Args:
643
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
644
+ output_attentions (`bool`, *optional*):
645
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
646
+ returned tensors for more detail.
647
+ """
648
+ residual = hidden_states
649
+
650
+ hidden_states = self.layer_norm1(hidden_states)
651
+ hidden_states, attn_weights = self.self_attn(
652
+ hidden_states=hidden_states,
653
+ output_attentions=output_attentions,
654
+ )
655
+ hidden_states = residual + hidden_states
656
+
657
+ residual = hidden_states
658
+ hidden_states = self.layer_norm2(hidden_states)
659
+ hidden_states = self.mlp(hidden_states)
660
+ hidden_states = residual + hidden_states
661
+
662
+ outputs = (hidden_states,)
663
+
664
+ if output_attentions:
665
+ outputs += (attn_weights,)
666
+
667
+ return outputs
668
+
669
+
670
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText
671
+ class ChineseCLIPTextPooler(nn.Module):
672
+ def __init__(self, config):
673
+ super().__init__()
674
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
675
+ self.activation = nn.Tanh()
676
+
677
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
678
+ # We "pool" the model by simply taking the hidden state corresponding
679
+ # to the first token.
680
+ first_token_tensor = hidden_states[:, 0]
681
+ pooled_output = self.dense(first_token_tensor)
682
+ pooled_output = self.activation(pooled_output)
683
+ return pooled_output
684
+
685
+
686
+ class ChineseCLIPPreTrainedModel(PreTrainedModel):
687
+ """
688
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
689
+ models.
690
+ """
691
+
692
+ config_class = ChineseCLIPConfig
693
+ base_model_prefix = "chinese_clip"
694
+ supports_gradient_checkpointing = True
695
+
696
+ def _init_weights(self, module):
697
+ """Initialize the weights"""
698
+ factor = self.config.initializer_factor
699
+ if isinstance(module, ChineseCLIPVisionEmbeddings):
700
+ factor = self.config.initializer_factor
701
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
702
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
703
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
704
+ elif isinstance(module, ChineseCLIPTextEmbeddings):
705
+ nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range)
706
+ nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range)
707
+ nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range)
708
+ for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]:
709
+ if embedding.padding_idx is not None:
710
+ embedding.weight.data[embedding.padding_idx].zero_()
711
+ elif isinstance(module, ChineseCLIPVisionAttention):
712
+ factor = self.config.initializer_factor
713
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
714
+ out_proj_std = (module.embed_dim**-0.5) * factor
715
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
716
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
717
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
718
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
719
+ elif isinstance(module, ChineseCLIPVisionMLP):
720
+ factor = self.config.initializer_factor
721
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
722
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
723
+ nn.init.normal_(module.fc1.weight, std=fc_std)
724
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
725
+ elif isinstance(module, ChineseCLIPModel):
726
+ nn.init.normal_(
727
+ module.text_projection.weight,
728
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
729
+ )
730
+ nn.init.normal_(
731
+ module.visual_projection.weight,
732
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
733
+ )
734
+
735
+ if isinstance(module, nn.LayerNorm):
736
+ module.bias.data.zero_()
737
+ module.weight.data.fill_(1.0)
738
+ if isinstance(module, nn.Linear):
739
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
740
+ if module.bias is not None:
741
+ module.bias.data.zero_()
742
+
743
+
744
+ CHINESE_CLIP_START_DOCSTRING = r"""
745
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
746
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
747
+ behavior.
748
+
749
+ Parameters:
750
+ config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model.
751
+ Initializing with a config file does not load the weights associated with the model, only the
752
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
753
+ """
754
+
755
+ CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r"""
756
+ Args:
757
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
758
+ Indices of input sequence tokens in the vocabulary.
759
+
760
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
761
+ [`PreTrainedTokenizer.__call__`] for details.
762
+
763
+ [What are input IDs?](../glossary#input-ids)
764
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
765
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
766
+
767
+ - 1 for tokens that are **not masked**,
768
+ - 0 for tokens that are **masked**.
769
+
770
+ [What are attention masks?](../glossary#attention-mask)
771
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
772
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
773
+ 1]`:
774
+
775
+ - 0 corresponds to a *sentence A* token,
776
+ - 1 corresponds to a *sentence B* token.
777
+
778
+ [What are token type IDs?](../glossary#token-type-ids)
779
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
780
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
781
+ config.max_position_embeddings - 1]`.
782
+
783
+ [What are position IDs?](../glossary#position-ids)
784
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
785
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
786
+
787
+ - 1 indicates the head is **not masked**,
788
+ - 0 indicates the head is **masked**.
789
+
790
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
791
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
792
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
793
+ model's internal embedding lookup matrix.
794
+ output_attentions (`bool`, *optional*):
795
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
796
+ tensors for more detail.
797
+ output_hidden_states (`bool`, *optional*):
798
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
799
+ more detail.
800
+ return_dict (`bool`, *optional*):
801
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
802
+ """
803
+
804
+ CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r"""
805
+ Args:
806
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
807
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
808
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
809
+ output_attentions (`bool`, *optional*):
810
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
811
+ tensors for more detail.
812
+ output_hidden_states (`bool`, *optional*):
813
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
814
+ more detail.
815
+ return_dict (`bool`, *optional*):
816
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
817
+ """
818
+
819
+ CHINESE_CLIP_INPUTS_DOCSTRING = r"""
820
+ Args:
821
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
822
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
823
+ it.
824
+
825
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
826
+ [`PreTrainedTokenizer.__call__`] for details.
827
+
828
+ [What are input IDs?](../glossary#input-ids)
829
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
830
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
831
+
832
+ - 1 for tokens that are **not masked**,
833
+ - 0 for tokens that are **masked**.
834
+
835
+ [What are attention masks?](../glossary#attention-mask)
836
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
837
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
838
+ 1]`:
839
+
840
+ - 0 corresponds to a *sentence A* token,
841
+ - 1 corresponds to a *sentence B* token.
842
+
843
+ [What are token type IDs?](../glossary#token-type-ids)
844
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
845
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
846
+ config.max_position_embeddings - 1]`.
847
+
848
+ [What are position IDs?](../glossary#position-ids)
849
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
850
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
851
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
852
+ return_loss (`bool`, *optional*):
853
+ Whether or not to return the contrastive loss.
854
+ output_attentions (`bool`, *optional*):
855
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
856
+ tensors for more detail.
857
+ output_hidden_states (`bool`, *optional*):
858
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
859
+ more detail.
860
+ return_dict (`bool`, *optional*):
861
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
862
+ """
863
+
864
+
865
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText
866
+ class ChineseCLIPTextEncoder(nn.Module):
867
+ def __init__(self, config):
868
+ super().__init__()
869
+ self.config = config
870
+ self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)])
871
+ self.gradient_checkpointing = False
872
+
873
+ def forward(
874
+ self,
875
+ hidden_states: torch.Tensor,
876
+ attention_mask: Optional[torch.FloatTensor] = None,
877
+ head_mask: Optional[torch.FloatTensor] = None,
878
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
879
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
880
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
881
+ use_cache: Optional[bool] = None,
882
+ output_attentions: Optional[bool] = False,
883
+ output_hidden_states: Optional[bool] = False,
884
+ return_dict: Optional[bool] = True,
885
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
886
+ all_hidden_states = () if output_hidden_states else None
887
+ all_self_attentions = () if output_attentions else None
888
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
889
+
890
+ if self.gradient_checkpointing and self.training:
891
+ if use_cache:
892
+ logger.warning_once(
893
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
894
+ )
895
+ use_cache = False
896
+
897
+ next_decoder_cache = () if use_cache else None
898
+ for i, layer_module in enumerate(self.layer):
899
+ if output_hidden_states:
900
+ all_hidden_states = all_hidden_states + (hidden_states,)
901
+
902
+ layer_head_mask = head_mask[i] if head_mask is not None else None
903
+ past_key_value = past_key_values[i] if past_key_values is not None else None
904
+
905
+ if self.gradient_checkpointing and self.training:
906
+ layer_outputs = self._gradient_checkpointing_func(
907
+ layer_module.__call__,
908
+ hidden_states,
909
+ attention_mask,
910
+ layer_head_mask,
911
+ encoder_hidden_states,
912
+ encoder_attention_mask,
913
+ past_key_value,
914
+ output_attentions,
915
+ )
916
+ else:
917
+ layer_outputs = layer_module(
918
+ hidden_states,
919
+ attention_mask,
920
+ layer_head_mask,
921
+ encoder_hidden_states,
922
+ encoder_attention_mask,
923
+ past_key_value,
924
+ output_attentions,
925
+ )
926
+
927
+ hidden_states = layer_outputs[0]
928
+ if use_cache:
929
+ next_decoder_cache += (layer_outputs[-1],)
930
+ if output_attentions:
931
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
932
+ if self.config.add_cross_attention:
933
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
934
+
935
+ if output_hidden_states:
936
+ all_hidden_states = all_hidden_states + (hidden_states,)
937
+
938
+ if not return_dict:
939
+ return tuple(
940
+ v
941
+ for v in [
942
+ hidden_states,
943
+ next_decoder_cache,
944
+ all_hidden_states,
945
+ all_self_attentions,
946
+ all_cross_attentions,
947
+ ]
948
+ if v is not None
949
+ )
950
+ return BaseModelOutputWithPastAndCrossAttentions(
951
+ last_hidden_state=hidden_states,
952
+ past_key_values=next_decoder_cache,
953
+ hidden_states=all_hidden_states,
954
+ attentions=all_self_attentions,
955
+ cross_attentions=all_cross_attentions,
956
+ )
957
+
958
+
959
+ class ChineseCLIPVisionEncoder(nn.Module):
960
+ """
961
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
962
+ [`ChineseCLIPVisionEncoderLayer`].
963
+
964
+ Args:
965
+ config: ChineseCLIPConfig
966
+ """
967
+
968
+ def __init__(self, config: ChineseCLIPConfig):
969
+ super().__init__()
970
+ self.config = config
971
+ self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])
972
+ self.gradient_checkpointing = False
973
+
974
+ def forward(
975
+ self,
976
+ inputs_embeds,
977
+ output_attentions: Optional[bool] = None,
978
+ output_hidden_states: Optional[bool] = None,
979
+ return_dict: Optional[bool] = None,
980
+ ) -> Union[Tuple, BaseModelOutput]:
981
+ r"""
982
+ Args:
983
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
984
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
985
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
986
+ than the model's internal embedding lookup matrix.
987
+ output_attentions (`bool`, *optional*):
988
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
989
+ returned tensors for more detail.
990
+ output_hidden_states (`bool`, *optional*):
991
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
992
+ for more detail.
993
+ return_dict (`bool`, *optional*):
994
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
995
+ """
996
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
997
+ output_hidden_states = (
998
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
999
+ )
1000
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1001
+
1002
+ encoder_states = () if output_hidden_states else None
1003
+ all_attentions = () if output_attentions else None
1004
+
1005
+ hidden_states = inputs_embeds
1006
+ for idx, encoder_layer in enumerate(self.layers):
1007
+ if output_hidden_states:
1008
+ encoder_states = encoder_states + (hidden_states,)
1009
+ if self.gradient_checkpointing and self.training:
1010
+ layer_outputs = self._gradient_checkpointing_func(
1011
+ encoder_layer.__call__,
1012
+ hidden_states,
1013
+ output_attentions,
1014
+ )
1015
+ else:
1016
+ layer_outputs = encoder_layer(
1017
+ hidden_states,
1018
+ output_attentions=output_attentions,
1019
+ )
1020
+
1021
+ hidden_states = layer_outputs[0]
1022
+
1023
+ if output_attentions:
1024
+ all_attentions = all_attentions + (layer_outputs[1],)
1025
+
1026
+ if output_hidden_states:
1027
+ encoder_states = encoder_states + (hidden_states,)
1028
+
1029
+ if not return_dict:
1030
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1031
+ return BaseModelOutput(
1032
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1033
+ )
1034
+
1035
+
1036
+ class ChineseCLIPVisionTransformer(nn.Module):
1037
+ def __init__(self, config: ChineseCLIPVisionConfig):
1038
+ super().__init__()
1039
+ self.config = config
1040
+ embed_dim = config.hidden_size
1041
+
1042
+ self.embeddings = ChineseCLIPVisionEmbeddings(config)
1043
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1044
+ self.encoder = ChineseCLIPVisionEncoder(config)
1045
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1046
+
1047
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
1048
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
1049
+ def forward(
1050
+ self,
1051
+ pixel_values: Optional[torch.FloatTensor] = None,
1052
+ output_attentions: Optional[bool] = None,
1053
+ output_hidden_states: Optional[bool] = None,
1054
+ return_dict: Optional[bool] = None,
1055
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1056
+ r"""
1057
+ Returns:
1058
+ """
1059
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1060
+ output_hidden_states = (
1061
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1062
+ )
1063
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1064
+
1065
+ if pixel_values is None:
1066
+ raise ValueError("You have to specify pixel_values")
1067
+
1068
+ hidden_states = self.embeddings(pixel_values)
1069
+ hidden_states = self.pre_layrnorm(hidden_states)
1070
+
1071
+ encoder_outputs = self.encoder(
1072
+ inputs_embeds=hidden_states,
1073
+ output_attentions=output_attentions,
1074
+ output_hidden_states=output_hidden_states,
1075
+ return_dict=return_dict,
1076
+ )
1077
+
1078
+ last_hidden_state = encoder_outputs[0]
1079
+ pooled_output = last_hidden_state[:, 0, :]
1080
+ pooled_output = self.post_layernorm(pooled_output)
1081
+
1082
+ if not return_dict:
1083
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1084
+
1085
+ return BaseModelOutputWithPooling(
1086
+ last_hidden_state=last_hidden_state,
1087
+ pooler_output=pooled_output,
1088
+ hidden_states=encoder_outputs.hidden_states,
1089
+ attentions=encoder_outputs.attentions,
1090
+ )
1091
+
1092
+
1093
+ @add_start_docstrings(
1094
+ "The text model from CHINESE_CLIP without any head or projection on top.",
1095
+ CHINESE_CLIP_START_DOCSTRING,
1096
+ )
1097
+ class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel):
1098
+ """
1099
+
1100
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
1101
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
1102
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
1103
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
1104
+
1105
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
1106
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
1107
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
1108
+ """
1109
+
1110
+ config_class = ChineseCLIPTextConfig
1111
+
1112
+ def __init__(self, config, add_pooling_layer=True):
1113
+ super().__init__(config)
1114
+ self.config = config
1115
+
1116
+ self.embeddings = ChineseCLIPTextEmbeddings(config)
1117
+ self.encoder = ChineseCLIPTextEncoder(config)
1118
+
1119
+ self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None
1120
+
1121
+ # Initialize weights and apply final processing
1122
+ self.post_init()
1123
+
1124
+ def get_input_embeddings(self):
1125
+ return self.embeddings.word_embeddings
1126
+
1127
+ def set_input_embeddings(self, value):
1128
+ self.embeddings.word_embeddings = value
1129
+
1130
+ def _prune_heads(self, heads_to_prune):
1131
+ """
1132
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1133
+ class PreTrainedModel
1134
+ """
1135
+ for layer, heads in heads_to_prune.items():
1136
+ self.encoder.layer[layer].attention.prune_heads(heads)
1137
+
1138
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1139
+ @add_code_sample_docstrings(
1140
+ checkpoint=_CHECKPOINT_FOR_DOC,
1141
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
1142
+ config_class=_CONFIG_FOR_DOC,
1143
+ )
1144
+ def forward(
1145
+ self,
1146
+ input_ids: Optional[torch.Tensor] = None,
1147
+ attention_mask: Optional[torch.Tensor] = None,
1148
+ token_type_ids: Optional[torch.Tensor] = None,
1149
+ position_ids: Optional[torch.Tensor] = None,
1150
+ head_mask: Optional[torch.Tensor] = None,
1151
+ inputs_embeds: Optional[torch.Tensor] = None,
1152
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1153
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1154
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1155
+ use_cache: Optional[bool] = None,
1156
+ output_attentions: Optional[bool] = None,
1157
+ output_hidden_states: Optional[bool] = None,
1158
+ return_dict: Optional[bool] = None,
1159
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1160
+ r"""
1161
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1162
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1163
+ the model is configured as a decoder.
1164
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1165
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1166
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1167
+
1168
+ - 1 for tokens that are **not masked**,
1169
+ - 0 for tokens that are **masked**.
1170
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1171
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1172
+
1173
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1174
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1175
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1176
+ use_cache (`bool`, *optional*):
1177
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1178
+ `past_key_values`).
1179
+ """
1180
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1181
+ output_hidden_states = (
1182
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1183
+ )
1184
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1185
+
1186
+ if self.config.is_decoder:
1187
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1188
+ else:
1189
+ use_cache = False
1190
+
1191
+ if input_ids is not None and inputs_embeds is not None:
1192
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1193
+ elif input_ids is not None:
1194
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1195
+ input_shape = input_ids.size()
1196
+ elif inputs_embeds is not None:
1197
+ input_shape = inputs_embeds.size()[:-1]
1198
+ else:
1199
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1200
+
1201
+ batch_size, seq_length = input_shape
1202
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1203
+
1204
+ # past_key_values_length
1205
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1206
+
1207
+ if attention_mask is None:
1208
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1209
+
1210
+ if token_type_ids is None:
1211
+ if hasattr(self.embeddings, "token_type_ids"):
1212
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1213
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1214
+ token_type_ids = buffered_token_type_ids_expanded
1215
+ else:
1216
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1217
+
1218
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1219
+ # ourselves in which case we just need to make it broadcastable to all heads.
1220
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1221
+
1222
+ # If a 2D or 3D attention mask is provided for the cross-attention
1223
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1224
+ if self.config.is_decoder and encoder_hidden_states is not None:
1225
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1226
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1227
+ if encoder_attention_mask is None:
1228
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1229
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1230
+ else:
1231
+ encoder_extended_attention_mask = None
1232
+
1233
+ # Prepare head mask if needed
1234
+ # 1.0 in head_mask indicate we keep the head
1235
+ # attention_probs has shape bsz x n_heads x N x N
1236
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1237
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1238
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1239
+
1240
+ embedding_output = self.embeddings(
1241
+ input_ids=input_ids,
1242
+ position_ids=position_ids,
1243
+ token_type_ids=token_type_ids,
1244
+ inputs_embeds=inputs_embeds,
1245
+ past_key_values_length=past_key_values_length,
1246
+ )
1247
+ encoder_outputs = self.encoder(
1248
+ embedding_output,
1249
+ attention_mask=extended_attention_mask,
1250
+ head_mask=head_mask,
1251
+ encoder_hidden_states=encoder_hidden_states,
1252
+ encoder_attention_mask=encoder_extended_attention_mask,
1253
+ past_key_values=past_key_values,
1254
+ use_cache=use_cache,
1255
+ output_attentions=output_attentions,
1256
+ output_hidden_states=output_hidden_states,
1257
+ return_dict=return_dict,
1258
+ )
1259
+ sequence_output = encoder_outputs[0]
1260
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1261
+
1262
+ if not return_dict:
1263
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1264
+
1265
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1266
+ last_hidden_state=sequence_output,
1267
+ pooler_output=pooled_output,
1268
+ past_key_values=encoder_outputs.past_key_values,
1269
+ hidden_states=encoder_outputs.hidden_states,
1270
+ attentions=encoder_outputs.attentions,
1271
+ cross_attentions=encoder_outputs.cross_attentions,
1272
+ )
1273
+
1274
+
1275
+ @add_start_docstrings(
1276
+ """The vision model from CHINESE_CLIP without any head or projection on top.""",
1277
+ CHINESE_CLIP_START_DOCSTRING,
1278
+ )
1279
+ class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel):
1280
+ config_class = ChineseCLIPVisionConfig
1281
+ main_input_name = "pixel_values"
1282
+
1283
+ def __init__(self, config: ChineseCLIPVisionConfig):
1284
+ super().__init__(config)
1285
+ self.vision_model = ChineseCLIPVisionTransformer(config)
1286
+ # Initialize weights and apply final processing
1287
+ self.post_init()
1288
+
1289
+ def get_input_embeddings(self) -> nn.Module:
1290
+ return self.vision_model.embeddings.patch_embedding
1291
+
1292
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
1293
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
1294
+ def forward(
1295
+ self,
1296
+ pixel_values: Optional[torch.FloatTensor] = None,
1297
+ output_attentions: Optional[bool] = None,
1298
+ output_hidden_states: Optional[bool] = None,
1299
+ return_dict: Optional[bool] = None,
1300
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1301
+ r"""
1302
+ Returns:
1303
+
1304
+ Examples:
1305
+
1306
+ ```python
1307
+ >>> from PIL import Image
1308
+ >>> import requests
1309
+ >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel
1310
+
1311
+ >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1312
+ >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1313
+
1314
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
1315
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1316
+
1317
+ >>> inputs = processor(images=image, return_tensors="pt")
1318
+
1319
+ >>> outputs = model(**inputs)
1320
+ >>> last_hidden_state = outputs.last_hidden_state
1321
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1322
+ ```"""
1323
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1324
+
1325
+ return self.vision_model(
1326
+ pixel_values=pixel_values,
1327
+ output_attentions=output_attentions,
1328
+ output_hidden_states=output_hidden_states,
1329
+ return_dict=return_dict,
1330
+ )
1331
+
1332
+
1333
+ @add_start_docstrings(CHINESE_CLIP_START_DOCSTRING)
1334
+ class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
1335
+ config_class = ChineseCLIPConfig
1336
+
1337
+ def __init__(self, config: ChineseCLIPConfig):
1338
+ super().__init__(config)
1339
+
1340
+ if not isinstance(config.text_config, ChineseCLIPTextConfig):
1341
+ raise ValueError(
1342
+ "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
1343
+ f" {type(config.text_config)}."
1344
+ )
1345
+
1346
+ if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
1347
+ raise ValueError(
1348
+ "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
1349
+ f" {type(config.vision_config)}."
1350
+ )
1351
+
1352
+ text_config = config.text_config
1353
+ vision_config = config.vision_config
1354
+
1355
+ self.projection_dim = config.projection_dim
1356
+ self.text_embed_dim = text_config.hidden_size
1357
+ self.vision_embed_dim = vision_config.hidden_size
1358
+
1359
+ self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False)
1360
+ self.vision_model = ChineseCLIPVisionTransformer(vision_config)
1361
+
1362
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
1363
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
1364
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1365
+
1366
+ # Initialize weights and apply final processing
1367
+ self.post_init()
1368
+
1369
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING)
1370
+ def get_text_features(
1371
+ self,
1372
+ input_ids: Optional[torch.Tensor] = None,
1373
+ attention_mask: Optional[torch.Tensor] = None,
1374
+ token_type_ids: Optional[torch.Tensor] = None,
1375
+ position_ids: Optional[torch.Tensor] = None,
1376
+ output_attentions: Optional[bool] = None,
1377
+ output_hidden_states: Optional[bool] = None,
1378
+ return_dict: Optional[bool] = None,
1379
+ ) -> torch.FloatTensor:
1380
+ r"""
1381
+ Returns:
1382
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1383
+ applying the projection layer to the final [CLS] hidden state of Text-Transformer.
1384
+
1385
+ Examples:
1386
+
1387
+ ```python
1388
+ >>> from transformers import AutoTokenizer, ChineseCLIPModel
1389
+
1390
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1391
+ >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1392
+
1393
+ >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt")
1394
+ >>> text_features = model.get_text_features(**inputs)
1395
+ >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
1396
+ ```"""
1397
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
1398
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1399
+ output_hidden_states = (
1400
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1401
+ )
1402
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1403
+
1404
+ text_outputs = self.text_model(
1405
+ input_ids=input_ids,
1406
+ attention_mask=attention_mask,
1407
+ token_type_ids=token_type_ids,
1408
+ position_ids=position_ids,
1409
+ output_attentions=output_attentions,
1410
+ output_hidden_states=output_hidden_states,
1411
+ return_dict=return_dict,
1412
+ )
1413
+
1414
+ pooled_output = text_outputs[0][:, 0, :]
1415
+ text_features = self.text_projection(pooled_output)
1416
+
1417
+ return text_features
1418
+
1419
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
1420
+ def get_image_features(
1421
+ self,
1422
+ pixel_values: Optional[torch.FloatTensor] = None,
1423
+ output_attentions: Optional[bool] = None,
1424
+ output_hidden_states: Optional[bool] = None,
1425
+ return_dict: Optional[bool] = None,
1426
+ ) -> torch.FloatTensor:
1427
+ r"""
1428
+ Returns:
1429
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1430
+ applying the projection layer to the final [CLS] hidden state of Vision-Transformer.
1431
+
1432
+ Examples:
1433
+
1434
+ ```python
1435
+ >>> from PIL import Image
1436
+ >>> import requests
1437
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
1438
+
1439
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1440
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1441
+
1442
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
1443
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1444
+
1445
+ >>> inputs = processor(images=image, return_tensors="pt")
1446
+
1447
+ >>> image_features = model.get_image_features(**inputs)
1448
+ >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
1449
+ ```"""
1450
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
1451
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1452
+ output_hidden_states = (
1453
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1454
+ )
1455
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1456
+
1457
+ vision_outputs = self.vision_model(
1458
+ pixel_values=pixel_values,
1459
+ output_attentions=output_attentions,
1460
+ output_hidden_states=output_hidden_states,
1461
+ return_dict=return_dict,
1462
+ )
1463
+
1464
+ pooled_output = vision_outputs[1] # pooled_output
1465
+ image_features = self.visual_projection(pooled_output)
1466
+
1467
+ return image_features
1468
+
1469
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING)
1470
+ @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig)
1471
+ def forward(
1472
+ self,
1473
+ input_ids: Optional[torch.LongTensor] = None,
1474
+ pixel_values: Optional[torch.FloatTensor] = None,
1475
+ attention_mask: Optional[torch.Tensor] = None,
1476
+ token_type_ids: Optional[torch.Tensor] = None,
1477
+ position_ids: Optional[torch.LongTensor] = None,
1478
+ return_loss: Optional[bool] = None,
1479
+ output_attentions: Optional[bool] = None,
1480
+ output_hidden_states: Optional[bool] = None,
1481
+ return_dict: Optional[bool] = None,
1482
+ ) -> Union[Tuple, ChineseCLIPOutput]:
1483
+ r"""
1484
+ Returns:
1485
+
1486
+ Examples:
1487
+
1488
+ ```python
1489
+ >>> from PIL import Image
1490
+ >>> import requests
1491
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
1492
+
1493
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1494
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
1495
+
1496
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
1497
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1498
+
1499
+ >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True)
1500
+
1501
+ >>> outputs = model(**inputs)
1502
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1503
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1504
+ ```"""
1505
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
1506
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1507
+ output_hidden_states = (
1508
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1509
+ )
1510
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1511
+
1512
+ vision_outputs = self.vision_model(
1513
+ pixel_values=pixel_values,
1514
+ output_attentions=output_attentions,
1515
+ output_hidden_states=output_hidden_states,
1516
+ return_dict=return_dict,
1517
+ )
1518
+
1519
+ text_outputs = self.text_model(
1520
+ input_ids=input_ids,
1521
+ attention_mask=attention_mask,
1522
+ token_type_ids=token_type_ids,
1523
+ position_ids=position_ids,
1524
+ output_attentions=output_attentions,
1525
+ output_hidden_states=output_hidden_states,
1526
+ return_dict=return_dict,
1527
+ )
1528
+
1529
+ image_embeds = vision_outputs[1]
1530
+ image_embeds = self.visual_projection(image_embeds)
1531
+
1532
+ text_embeds = text_outputs[0][:, 0, :]
1533
+ text_embeds = self.text_projection(text_embeds)
1534
+
1535
+ # normalized features
1536
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1537
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1538
+
1539
+ # cosine similarity as logits
1540
+ logit_scale = self.logit_scale.exp()
1541
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1542
+ logits_per_image = logits_per_text.t()
1543
+
1544
+ loss = None
1545
+ if return_loss:
1546
+ loss = chinese_clip_loss(logits_per_text)
1547
+
1548
+ if not return_dict:
1549
+ # fix the None pooled_output of text_outputs to conform with dict_output
1550
+ pooled_output = text_outputs[1]
1551
+ if pooled_output is None:
1552
+ text_outputs = (text_outputs[0],) + text_outputs[2:]
1553
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1554
+ return ((loss,) + output) if loss is not None else output
1555
+
1556
+ return ChineseCLIPOutput(
1557
+ loss=loss,
1558
+ logits_per_image=logits_per_image,
1559
+ logits_per_text=logits_per_text,
1560
+ text_embeds=text_embeds,
1561
+ image_embeds=image_embeds,
1562
+ text_model_output=text_outputs,
1563
+ vision_model_output=vision_outputs,
1564
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for Chinese-CLIP
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding
23
+
24
+
25
+ class ChineseCLIPProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a
28
+ single processor.
29
+
30
+ [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].
31
+ See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.
32
+
33
+ Args:
34
+ image_processor ([`ChineseCLIPImageProcessor`], *optional*):
35
+ The image processor is a required input.
36
+ tokenizer ([`BertTokenizerFast`], *optional*):
37
+ The tokenizer is a required input.
38
+ """
39
+
40
+ attributes = ["image_processor", "tokenizer"]
41
+ image_processor_class = "ChineseCLIPImageProcessor"
42
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
43
+
44
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
45
+ feature_extractor = None
46
+ if "feature_extractor" in kwargs:
47
+ warnings.warn(
48
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
49
+ " instead.",
50
+ FutureWarning,
51
+ )
52
+ feature_extractor = kwargs.pop("feature_extractor")
53
+
54
+ image_processor = image_processor if image_processor is not None else feature_extractor
55
+ if image_processor is None:
56
+ raise ValueError("You need to specify an `image_processor`.")
57
+ if tokenizer is None:
58
+ raise ValueError("You need to specify a `tokenizer`.")
59
+
60
+ super().__init__(image_processor, tokenizer)
61
+ self.current_processor = self.image_processor
62
+
63
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
64
+ """
65
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
66
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
67
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
68
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
69
+ of the above two methods for more information.
70
+
71
+ Args:
72
+ text (`str`, `List[str]`, `List[List[str]]`):
73
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
74
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
75
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
76
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
77
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
78
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
79
+ number of channels, H and W are image height and width.
80
+
81
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
82
+ If set, will return tensors of a particular framework. Acceptable values are:
83
+
84
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
85
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
86
+ - `'np'`: Return NumPy `np.ndarray` objects.
87
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
88
+
89
+ Returns:
90
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
91
+
92
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
93
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
94
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
95
+ `None`).
96
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
97
+ """
98
+
99
+ if text is None and images is None:
100
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
101
+
102
+ if text is not None:
103
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
104
+
105
+ if images is not None:
106
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
107
+
108
+ if text is not None and images is not None:
109
+ encoding["pixel_values"] = image_features.pixel_values
110
+ return encoding
111
+ elif text is not None:
112
+ return encoding
113
+ else:
114
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
115
+
116
+ def batch_decode(self, *args, **kwargs):
117
+ """
118
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
119
+ refer to the docstring of this method for more information.
120
+ """
121
+ return self.tokenizer.batch_decode(*args, **kwargs)
122
+
123
+ def decode(self, *args, **kwargs):
124
+ """
125
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
126
+ the docstring of this method for more information.
127
+ """
128
+ return self.tokenizer.decode(*args, **kwargs)
129
+
130
+ @property
131
+ def model_input_names(self):
132
+ tokenizer_input_names = self.tokenizer.model_input_names
133
+ image_processor_input_names = self.image_processor.model_input_names
134
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
135
+
136
+ @property
137
+ def feature_extractor_class(self):
138
+ warnings.warn(
139
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
140
+ FutureWarning,
141
+ )
142
+ return self.image_processor_class
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
21
+ "feature_extraction_mctct": ["MCTCTFeatureExtractor"],
22
+ "processing_mctct": ["MCTCTProcessor"],
23
+ }
24
+
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_mctct"] = [
33
+ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
34
+ "MCTCTForCTC",
35
+ "MCTCTModel",
36
+ "MCTCTPreTrainedModel",
37
+ ]
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
42
+ from .feature_extraction_mctct import MCTCTFeatureExtractor
43
+ from .processing_mctct import MCTCTProcessor
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
52
+
53
+ else:
54
+ import sys
55
+
56
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc ADDED
Binary file (7.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc ADDED
Binary file (22.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """M-CTC-T model configuration"""
16
+
17
+ from ....configuration_utils import PretrainedConfig
18
+ from ....utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
25
+ # See all M-CTC-T models at https://huggingface.co/models?filter=mctct
26
+ }
27
+
28
+
29
+ class MCTCTConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
32
+ M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the M-CTC-T
34
+ [speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 8065):
42
+ Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`MCTCTModel`].
44
+ hidden_size (`int`, *optional*, defaults to 1536):
45
+ Dimension of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 36):
47
+ Number of hidden layers in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 6144):
49
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 4):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ attention_head_dim (`int`, *optional*, defaults to 384):
53
+ Dimensions of each attention head for each attention layer in the Transformer encoder.
54
+ max_position_embeddings (`int`, *optional*, defaults to 920):
55
+ The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
56
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
57
+ The epsilon used by the layer normalization layers.
58
+ layerdrop (`float`, *optional*, defaults to 0.3):
59
+ The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
60
+ implementation.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
62
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
63
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.3):
67
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
68
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.3):
69
+ The dropout ratio for the attention probabilities.
70
+ pad_token_id (`int`, *optional*, defaults to 1):
71
+ The tokenizer index of the pad token.
72
+ bos_token_id (`int`, *optional*, defaults to 0):
73
+ The tokenizer index of the bos token.
74
+ eos_token_id (`int`, *optional*, defaults to 2):
75
+ The tokenizer index of the eos token.
76
+ conv_glu_dim (`int`, *optional*, defaults to 1):
77
+ The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
78
+ Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
79
+ conv_dropout (`int`, *optional*, defaults to 0.3):
80
+ The probability of randomly dropping the `Conv1dSubsampler` layer during training.
81
+ num_conv_layers (`int`, *optional*, defaults to 1):
82
+ Number of convolution layers before applying transformer encoder layers.
83
+ conv_kernel (`Sequence[int]`, *optional*, defaults to `(7,)`):
84
+ The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
85
+ to `num_conv_layers`.
86
+ conv_stride (`Sequence[int]`, *optional*, defaults to `(3,)`):
87
+ The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
88
+ to `num_conv_layers`.
89
+ input_feat_per_channel (`int`, *optional*, defaults to 80):
90
+ Feature dimensions of the channels of the input to the Conv1D layer.
91
+ input_channels (`int`, *optional*, defaults to 1):
92
+ Number of input channels of the input to the Conv1D layer.
93
+ conv_channels (`List[int]`, *optional*):
94
+ Channel sizes of intermediate Conv1D layers.
95
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
96
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
97
+ instance of [`MCTCTForCTC`].
98
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
99
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
100
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
101
+ of [`MCTCTForCTC`].
102
+
103
+ Example:
104
+
105
+ ```python
106
+ >>> from transformers import MCTCTConfig, MCTCTModel
107
+
108
+ >>> # Initializing a M-CTC-T mctct-large style configuration
109
+ >>> configuration = MCTCTConfig()
110
+
111
+ >>> # Initializing a model (with random weights) from the mctct-large style configuration
112
+ >>> model = MCTCTModel(configuration)
113
+
114
+ >>> # Accessing the model configuration
115
+ >>> configuration = model.config
116
+ ```"""
117
+
118
+ model_type = "mctct"
119
+
120
+ def __init__(
121
+ self,
122
+ vocab_size=8065,
123
+ hidden_size=1536,
124
+ num_hidden_layers=36,
125
+ intermediate_size=6144,
126
+ num_attention_heads=4,
127
+ attention_head_dim=384,
128
+ max_position_embeddings=920,
129
+ layer_norm_eps=1e-5,
130
+ layerdrop=0.3,
131
+ hidden_act="relu",
132
+ initializer_range=0.02,
133
+ hidden_dropout_prob=0.3,
134
+ attention_probs_dropout_prob=0.3,
135
+ pad_token_id=1,
136
+ bos_token_id=0,
137
+ eos_token_id=2,
138
+ conv_glu_dim=1,
139
+ conv_dropout=0.3,
140
+ num_conv_layers=1,
141
+ conv_kernel=(7,),
142
+ conv_stride=(3,),
143
+ input_feat_per_channel=80,
144
+ input_channels=1,
145
+ conv_channels=None,
146
+ ctc_loss_reduction="sum",
147
+ ctc_zero_infinity=False,
148
+ **kwargs,
149
+ ):
150
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
151
+ self.vocab_size = vocab_size
152
+ self.hidden_size = hidden_size
153
+ self.num_hidden_layers = num_hidden_layers
154
+ self.intermediate_size = intermediate_size
155
+ self.num_attention_heads = num_attention_heads
156
+ self.attention_head_dim = attention_head_dim
157
+ self.max_position_embeddings = max_position_embeddings
158
+ self.layer_norm_eps = layer_norm_eps
159
+ self.layerdrop = layerdrop
160
+ self.hidden_act = hidden_act
161
+ self.initializer_range = initializer_range
162
+ self.hidden_dropout_prob = hidden_dropout_prob
163
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
164
+ self.pad_token_id = pad_token_id
165
+ self.bos_token_id = bos_token_id
166
+ self.eos_token_id = eos_token_id
167
+ self.conv_glu_dim = conv_glu_dim
168
+ self.conv_dropout = conv_dropout
169
+ self.num_conv_layers = num_conv_layers
170
+ self.input_feat_per_channel = input_feat_per_channel
171
+ self.input_channels = input_channels
172
+ self.conv_channels = conv_channels
173
+ self.ctc_loss_reduction = ctc_loss_reduction
174
+ self.ctc_zero_infinity = ctc_zero_infinity
175
+
176
+ # prevents config testing fail with exporting to json
177
+ self.conv_kernel = list(conv_kernel)
178
+ self.conv_stride = list(conv_stride)
179
+
180
+ if len(self.conv_kernel) != self.num_conv_layers:
181
+ raise ValueError(
182
+ "Configuration for convolutional module is incorrect. "
183
+ "It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
184
+ f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
185
+ f"`config.num_conv_layers = {self.num_conv_layers}`."
186
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for M-CTC-T
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
24
+ from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
25
+ from ....feature_extraction_utils import BatchFeature
26
+ from ....file_utils import PaddingStrategy, TensorType
27
+ from ....utils import logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class MCTCTFeatureExtractor(SequenceFeatureExtractor):
34
+ r"""
35
+ Constructs a M-CTC-T feature extractor.
36
+
37
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
38
+ most of the main methods. Users should refer to this superclass for more information regarding those methods. This
39
+ code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to
40
+ this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an)
41
+ that takes the user step-by-step in the implementation.
42
+
43
+ Args:
44
+ feature_size (`int`, defaults to 80):
45
+ The feature dimension of the extracted features. This is the number of mel_frequency
46
+ sampling_rate (`int`, defaults to 16000):
47
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
48
+ padding_value (`float`, defaults to 0.0):
49
+ The value that is used to fill the padding values.
50
+ hop_length (`int`, defaults to 10):
51
+ Number of audio samples between windows. Otherwise referred to as "shift" in many papers.
52
+ win_length (`int`, defaults to 25):
53
+ Number of ms per window
54
+ win_function (`str`, defaults to `"hamming_window"`):
55
+ Name for the window function used for windowing, must be accessible via `torch.{win_function}`
56
+ frame_signal_scale (`float`, defaults to 32768.0):
57
+ Constant multiplied in creating the frames before applying DFT.
58
+ preemphasis_coeff (`float`, defaults to 0.97):
59
+ Constant multiplied in applying Pre-emphasis before DFT.
60
+ mel_floor (`float` defaults to 1.0):
61
+ Minimum value of mel frequency banks.
62
+ normalize_means (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to zero-mean normalize the extracted features.
64
+ normalize_vars (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to unit-variance normalize the extracted features.
66
+ """
67
+
68
+ model_input_names = ["input_features", "attention_mask"]
69
+
70
+ def __init__(
71
+ self,
72
+ feature_size=80,
73
+ sampling_rate=16000,
74
+ padding_value=0.0,
75
+ hop_length=10,
76
+ win_length=25,
77
+ win_function="hamming_window",
78
+ frame_signal_scale=32768.0,
79
+ preemphasis_coeff=0.97,
80
+ mel_floor=1.0,
81
+ normalize_means=True,
82
+ normalize_vars=True,
83
+ return_attention_mask=False,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
87
+
88
+ self.feature_size = feature_size
89
+ self.sampling_rate = sampling_rate
90
+ self.padding_value = padding_value
91
+ self.hop_length = hop_length
92
+ self.win_length = win_length
93
+ self.frame_signal_scale = frame_signal_scale
94
+ self.preemphasis_coeff = preemphasis_coeff
95
+ self.mel_floor = mel_floor
96
+ self.normalize_means = normalize_means
97
+ self.normalize_vars = normalize_vars
98
+ self.win_function = win_function
99
+ self.return_attention_mask = return_attention_mask
100
+
101
+ self.sample_size = win_length * sampling_rate // 1000
102
+ self.sample_stride = hop_length * sampling_rate // 1000
103
+
104
+ self.n_fft = optimal_fft_length(self.sample_size)
105
+ self.n_freqs = (self.n_fft // 2) + 1
106
+
107
+ def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray:
108
+ """
109
+ Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code.
110
+ """
111
+ if self.win_function == "hamming_window":
112
+ window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False)
113
+ else:
114
+ window = window_function(window_length=self.sample_size, name=self.win_function)
115
+
116
+ fbanks = mel_filter_bank(
117
+ num_frequency_bins=self.n_freqs,
118
+ num_mel_filters=self.feature_size,
119
+ min_frequency=0.0,
120
+ max_frequency=self.sampling_rate / 2.0,
121
+ sampling_rate=self.sampling_rate,
122
+ )
123
+
124
+ msfc_features = spectrogram(
125
+ one_waveform * self.frame_signal_scale,
126
+ window=window,
127
+ frame_length=self.sample_size,
128
+ hop_length=self.sample_stride,
129
+ fft_length=self.n_fft,
130
+ center=False,
131
+ preemphasis=self.preemphasis_coeff,
132
+ mel_filters=fbanks,
133
+ mel_floor=self.mel_floor,
134
+ log_mel="log",
135
+ )
136
+ return msfc_features.T
137
+
138
+ def _normalize_one(self, x, input_length, padding_value):
139
+ # make sure we normalize float32 arrays
140
+ if self.normalize_means:
141
+ mean = x[:input_length].mean(axis=0)
142
+ x = np.subtract(x, mean)
143
+ if self.normalize_vars:
144
+ std = x[:input_length].std(axis=0)
145
+ x = np.divide(x, std)
146
+
147
+ if input_length < x.shape[0]:
148
+ x[input_length:] = padding_value
149
+
150
+ # make sure array is in float32
151
+ x = x.astype(np.float32)
152
+
153
+ return x
154
+
155
+ def normalize(
156
+ self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
157
+ ) -> List[np.ndarray]:
158
+ lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
159
+ return [self._normalize_one(x, n, self.padding_value) for x, n in zip(input_features, lengths)]
160
+
161
+ def __call__(
162
+ self,
163
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
164
+ padding: Union[bool, str, PaddingStrategy] = False,
165
+ max_length: Optional[int] = None,
166
+ truncation: bool = False,
167
+ pad_to_multiple_of: Optional[int] = None,
168
+ return_attention_mask: Optional[bool] = None,
169
+ return_tensors: Optional[Union[str, TensorType]] = None,
170
+ sampling_rate: Optional[int] = None,
171
+ **kwargs,
172
+ ) -> BatchFeature:
173
+ """
174
+ Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the
175
+ log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code.
176
+
177
+ Args:
178
+ raw_speech (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[torch.Tensor]`, `List[np.ndarray]`, `List[List[float]]`):
179
+ The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list
180
+ of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be
181
+ mono channel audio, not stereo, i.e. single float per timestep.
182
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
183
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
184
+ index) among:
185
+
186
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
187
+ sequence if provided).
188
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
189
+ acceptable input length for the model if that argument is not provided.
190
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
191
+ lengths).
192
+ max_length (`int`, *optional*):
193
+ Maximum length of the returned list and optionally padding length (see above).
194
+ truncation (`bool`):
195
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
196
+ pad_to_multiple_of (`int`, *optional*):
197
+ If set will pad the sequence to a multiple of the provided value.
198
+
199
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
200
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
201
+ return_attention_mask (`bool`, *optional*):
202
+ Whether to return the attention mask. If left to the default, will return the attention mask according
203
+ to the specific feature_extractor's default.
204
+
205
+ [What are attention masks?](../glossary#attention-mask)
206
+
207
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
208
+ If set, will return tensors instead of list of python integers. Acceptable values are:
209
+
210
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
211
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
212
+ - `'np'`: Return Numpy `np.ndarray` objects.
213
+ sampling_rate (`int`, *optional*):
214
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
215
+ `sampling_rate` at the forward call to prevent silent errors.
216
+ padding_value (`float`, defaults to 0.0):
217
+ """
218
+
219
+ if sampling_rate is not None:
220
+ if sampling_rate != self.sampling_rate:
221
+ raise ValueError(
222
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
223
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
224
+ f" {self.sampling_rate} and not {sampling_rate}."
225
+ )
226
+ else:
227
+ logger.warning(
228
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
229
+ "Failing to do so can result in silent errors that might be hard to debug."
230
+ )
231
+
232
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
233
+ if is_batched_numpy and len(raw_speech.shape) > 2:
234
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
235
+ is_batched = is_batched_numpy or (
236
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
237
+ )
238
+
239
+ if is_batched:
240
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
241
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
242
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
243
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
244
+ raw_speech = raw_speech.astype(np.float32)
245
+
246
+ # always return batch
247
+ if not is_batched:
248
+ raw_speech = [raw_speech]
249
+
250
+ # extract fbank features
251
+ features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech]
252
+
253
+ # convert into correct format for padding
254
+ encoded_inputs = BatchFeature({"input_features": features})
255
+
256
+ padded_inputs = self.pad(
257
+ encoded_inputs,
258
+ padding=padding,
259
+ max_length=max_length,
260
+ truncation=truncation,
261
+ pad_to_multiple_of=pad_to_multiple_of,
262
+ return_attention_mask=True,
263
+ **kwargs,
264
+ )
265
+ # make sure list is in array format
266
+ input_features = padded_inputs.get("input_features")
267
+ if isinstance(input_features[0], list):
268
+ padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
269
+
270
+ attention_mask = padded_inputs.get("attention_mask")
271
+ if attention_mask is not None:
272
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
273
+
274
+ if self.normalize_means or self.normalize_vars:
275
+ attention_mask = (
276
+ np.array(attention_mask, dtype=np.int32)
277
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
278
+ and padding
279
+ else None
280
+ )
281
+ padded_inputs["input_features"] = self.normalize(
282
+ padded_inputs["input_features"], attention_mask=attention_mask
283
+ )
284
+
285
+ if return_tensors is not None:
286
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
287
+
288
+ return padded_inputs
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch M-CTC-T model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ....activations import ACT2FN
26
+ from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
27
+ from ....integrations.deepspeed import is_deepspeed_zero3_enabled
28
+ from ....modeling_attn_mask_utils import _prepare_4d_attention_mask
29
+ from ....modeling_outputs import BaseModelOutput, CausalLMOutput
30
+ from ....modeling_utils import (
31
+ PreTrainedModel,
32
+ apply_chunking_to_forward,
33
+ find_pruneable_heads_and_indices,
34
+ prune_linear_layer,
35
+ )
36
+ from ....utils import logging
37
+ from .configuration_mctct import MCTCTConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _HIDDEN_STATES_START_POSITION = 1
43
+
44
+ _CONFIG_FOR_DOC = "MCTCTConfig"
45
+
46
+ # Base docstring
47
+ _CHECKPOINT_FOR_DOC = "speechbrain/m-ctc-t-large"
48
+ _EXPECTED_OUTPUT_SHAPE = [1, 195, 1536]
49
+
50
+ # CTC docstring
51
+ _CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."'
52
+ _CTC_EXPECTED_LOSS = 1885.65
53
+
54
+
55
+ MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = [
56
+ "speechbrain/m-ctc-t-large",
57
+ # See all M-CTC-T models at https://huggingface.co/models?filter=mctct
58
+ ]
59
+
60
+
61
+ class MCTCTConv1dSubsampler(nn.Module):
62
+ """
63
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
64
+ via gated linear units (https://arxiv.org/abs/1911.08460)
65
+ """
66
+
67
+ def __init__(self, config):
68
+ super().__init__()
69
+ self.config = config
70
+ self.glu_dim = config.conv_glu_dim
71
+
72
+ self.dropout = nn.Dropout(config.conv_dropout)
73
+
74
+ self.num_layers = config.num_conv_layers
75
+ self.in_channels = config.input_feat_per_channel * config.input_channels
76
+
77
+ if self.num_layers > 1:
78
+ if config.conv_channels is None:
79
+ raise ValueError(
80
+ "Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution"
81
+ " layers."
82
+ )
83
+
84
+ self.mid_channels = config.conv_channels
85
+ else:
86
+ self.mid_channels = None
87
+
88
+ self.out_channels = config.hidden_size * 2 # considering GLU halving
89
+ self.kernel_size = config.conv_kernel
90
+ self.stride = config.conv_stride
91
+
92
+ # NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for
93
+ # multiple layers of convolutions, but not sure if this model definition should just restrict it
94
+ # to one layer. This becomes especially relevant when considering the padding like line 1 of forward().
95
+ self.conv_layers = nn.ModuleList(
96
+ nn.Conv1d(
97
+ self.in_channels if i == 0 else self.mid_channels[i],
98
+ self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels,
99
+ kernel_size=k,
100
+ stride=self.stride[i],
101
+ padding="valid",
102
+ )
103
+ for i, k in enumerate(self.kernel_size)
104
+ )
105
+
106
+ def forward(self, input_features):
107
+ # NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if
108
+ # there will be just one conv layer.
109
+ padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)
110
+
111
+ input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0)
112
+ hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time
113
+ for conv in self.conv_layers:
114
+ hidden_states = conv(hidden_states)
115
+ hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim)
116
+ hidden_states = self.dropout(hidden_states)
117
+
118
+ hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame
119
+ return hidden_states
120
+
121
+
122
+ class MCTCTEmbeddings(nn.Module):
123
+ """Construct the embeddings from word, position and token_type embeddings."""
124
+
125
+ def __init__(self, config):
126
+ super().__init__()
127
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
128
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
129
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
130
+
131
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
132
+ # any TensorFlow checkpoint file
133
+ # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
134
+ self.LayerNorm = MCTCTLayerNorm()
135
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
136
+
137
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
138
+ self.register_buffer(
139
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
140
+ )
141
+ self.register_buffer(
142
+ "token_type_ids",
143
+ torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
144
+ persistent=False,
145
+ )
146
+
147
+ def forward(
148
+ self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
149
+ ):
150
+ input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1]
151
+
152
+ seq_length = input_shape[1]
153
+
154
+ if position_ids is None:
155
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
156
+
157
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
158
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
159
+ # issue #5664
160
+ if token_type_ids is None:
161
+ if hasattr(self, "token_type_ids"):
162
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
163
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
164
+ token_type_ids = buffered_token_type_ids_expanded
165
+ else:
166
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
167
+
168
+ if inputs_embeds is None:
169
+ inputs_embeds = self.word_embeddings(input_features)
170
+
171
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
172
+
173
+ embeddings = inputs_embeds + token_type_embeddings
174
+
175
+ embeddings = self.LayerNorm(embeddings)
176
+ embeddings = self.dropout(embeddings)
177
+ return embeddings
178
+
179
+
180
+ class MCTCTSelfAttention(nn.Module):
181
+ def __init__(self, config):
182
+ super().__init__()
183
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
184
+ raise ValueError(
185
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
186
+ f"heads ({config.num_attention_heads})"
187
+ )
188
+
189
+ self.num_attention_heads = config.num_attention_heads
190
+ self.attention_head_size = config.attention_head_dim
191
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
192
+
193
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
194
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
195
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
196
+
197
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
198
+
199
+ self.max_position_embeddings = config.max_position_embeddings
200
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
201
+
202
+ self.is_decoder = config.is_decoder
203
+
204
+ def transpose_for_scores(self, x):
205
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
206
+ x = x.view(*new_x_shape)
207
+ return x.permute(0, 2, 1, 3)
208
+
209
+ def reshape_fortran(self, x, shape):
210
+ if len(x.shape) > 0:
211
+ x = x.permute(*reversed(range(len(x.shape))))
212
+ return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape))))
213
+
214
+ def relative_position_embedding_rotate(self, scores):
215
+ # NOTE: should re-evaluate whether this re-implementation was truly necessary
216
+ # or the reason why my complete re-haul worked was due to some other part
217
+ # of the code. Adding this and the reshape fortrain code seems very undesirable.
218
+ scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4]
219
+
220
+ batch, hidden_state, seq_len, heads = scores.shape
221
+
222
+ # e.g. [10, 1853, 14, 4]
223
+ scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1)
224
+
225
+ # e.g. [10, 25942, 1, 4]
226
+ scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads])
227
+
228
+ # e.g. [10, 25928, 1, 4]
229
+ scores = scores[:, : (seq_len + hidden_state - 1) * seq_len]
230
+
231
+ # e.g. [10, 1852, 14, 4]
232
+ scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads])
233
+
234
+ halfpoint = hidden_state // 2
235
+ scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4]
236
+
237
+ return scores.permute(0, 3, 1, 2)
238
+
239
+ def forward(
240
+ self,
241
+ hidden_states,
242
+ attention_mask=None,
243
+ head_mask=None,
244
+ output_attentions=False,
245
+ ):
246
+ mixed_query_layer = self.query(hidden_states)
247
+ mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size)
248
+
249
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
250
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
251
+
252
+ query_layer = self.transpose_for_scores(mixed_query_layer)
253
+
254
+ # Take the dot product between "query" and "key" to get the raw attention scores.
255
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
256
+
257
+ # relative key position embeddings
258
+ positional_embedding = self.distance_embedding.weight
259
+ relative_position_scores = torch.einsum("lh, bche -> bcle", positional_embedding, query_layer.transpose(2, 3))
260
+
261
+ relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores)
262
+ attention_scores = attention_scores + relative_position_scores
263
+
264
+ if attention_mask is not None:
265
+ # Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function)
266
+ attention_scores = attention_scores + attention_mask
267
+
268
+ # Normalize the attention scores to probabilities.
269
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
270
+
271
+ # This is actually dropping out entire tokens to attend to, which might
272
+ # seem a bit unusual, but is taken from the original Transformer paper.
273
+ attention_probs = self.dropout(attention_probs)
274
+
275
+ # Mask heads if we want to
276
+ if head_mask is not None:
277
+ attention_probs = attention_probs * head_mask
278
+
279
+ context_layer = torch.matmul(attention_probs, value_layer)
280
+
281
+ context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2)
282
+
283
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
284
+
285
+ return outputs
286
+
287
+
288
+ class MCTCTLayerNorm(nn.Module):
289
+ def __init__(self):
290
+ super().__init__()
291
+ self.singleton_weight = nn.Parameter(torch.ones(1))
292
+ self.singleton_bias = nn.Parameter(torch.zeros(1))
293
+
294
+ def forward(self, hidden_states):
295
+ return (hidden_states * self.singleton_weight) + self.singleton_bias
296
+
297
+
298
+ class MCTCTSelfOutput(nn.Module):
299
+ def __init__(self, config):
300
+ super().__init__()
301
+ self.config = config
302
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
303
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
304
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
305
+
306
+ def forward(self, hidden_states, input_tensor):
307
+ hidden_states = self.dense(hidden_states)
308
+ hidden_states = self.dropout(hidden_states)
309
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
310
+ return hidden_states
311
+
312
+
313
+ class MCTCTAttention(nn.Module):
314
+ def __init__(self, config):
315
+ super().__init__()
316
+ self.self = MCTCTSelfAttention(config)
317
+ self.output = MCTCTSelfOutput(config)
318
+ self.pruned_heads = set()
319
+
320
+ def prune_heads(self, heads):
321
+ if len(heads) == 0:
322
+ return
323
+ heads, index = find_pruneable_heads_and_indices(
324
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
325
+ )
326
+
327
+ # Prune linear layers
328
+ self.self.query = prune_linear_layer(self.self.query, index)
329
+ self.self.key = prune_linear_layer(self.self.key, index)
330
+ self.self.value = prune_linear_layer(self.self.value, index)
331
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
332
+
333
+ # Update hyper params and store pruned heads
334
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
335
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
336
+ self.pruned_heads = self.pruned_heads.union(heads)
337
+
338
+ def forward(
339
+ self,
340
+ hidden_states,
341
+ attention_mask=None,
342
+ head_mask=None,
343
+ output_attentions=False,
344
+ ):
345
+ self_outputs = self.self(
346
+ hidden_states,
347
+ attention_mask,
348
+ head_mask,
349
+ output_attentions,
350
+ )
351
+ attention_output = self.output(self_outputs[0], hidden_states)
352
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
353
+
354
+ return outputs
355
+
356
+
357
+ class MCTCTIntermediate(nn.Module):
358
+ def __init__(self, config):
359
+ super().__init__()
360
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
361
+ if isinstance(config.hidden_act, str):
362
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
363
+ else:
364
+ self.intermediate_act_fn = config.hidden_act
365
+
366
+ def forward(self, hidden_states):
367
+ hidden_states = self.dense(hidden_states)
368
+ hidden_states = self.intermediate_act_fn(hidden_states)
369
+ return hidden_states
370
+
371
+
372
+ class MCTCTOutput(nn.Module):
373
+ def __init__(self, config):
374
+ super().__init__()
375
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
376
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
377
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
378
+
379
+ def forward(self, hidden_states, input_tensor):
380
+ hidden_states = self.dense(hidden_states)
381
+ hidden_states = self.dropout(hidden_states)
382
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
383
+ return hidden_states
384
+
385
+
386
+ class MCTCTLayer(nn.Module):
387
+ def __init__(self, config: MCTCTConfig):
388
+ super().__init__()
389
+
390
+ self.seq_len_dim = 1
391
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
392
+
393
+ self.intermediate = MCTCTIntermediate(config)
394
+ self.attention = MCTCTAttention(config)
395
+ self.is_decoder = config.is_decoder
396
+ self.output = MCTCTOutput(config)
397
+
398
+ def forward(
399
+ self,
400
+ hidden_states,
401
+ attention_mask=None,
402
+ head_mask=None,
403
+ output_attentions=False,
404
+ ):
405
+ self_attention_outputs = self.attention(
406
+ hidden_states, attention_mask, head_mask, output_attentions=output_attentions
407
+ )
408
+ attention_output = self_attention_outputs[0]
409
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
410
+
411
+ layer_output = apply_chunking_to_forward(
412
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
413
+ )
414
+
415
+ outputs = (layer_output,) + outputs
416
+
417
+ return outputs
418
+
419
+ def feed_forward_chunk(self, attention_output):
420
+ intermediate_output = self.intermediate(attention_output)
421
+ layer_output = self.output(intermediate_output, attention_output)
422
+ return layer_output
423
+
424
+
425
+ class MCTCTPreTrainedModel(PreTrainedModel):
426
+ """
427
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
428
+ models.
429
+ """
430
+
431
+ config_class = MCTCTConfig
432
+ base_model_prefix = "mctct"
433
+ main_input_name = "input_features"
434
+ supports_gradient_checkpointing = True
435
+
436
+ def _init_weights(self, module):
437
+ """Initialize the weights"""
438
+ std = self.config.initializer_range
439
+ if isinstance(module, nn.Linear):
440
+ # Slightly different from the TF version which uses truncated_normal for initialization
441
+ # cf https://github.com/pytorch/pytorch/pull/5617
442
+ module.weight.data.normal_(mean=0.0, std=std)
443
+ if module.bias is not None:
444
+ module.bias.data.zero_()
445
+ elif isinstance(module, nn.Embedding):
446
+ module.weight.data.normal_(mean=0.0, std=std)
447
+ if module.padding_idx is not None:
448
+ module.weight.data[module.padding_idx].zero_()
449
+ elif isinstance(module, nn.LayerNorm):
450
+ module.bias.data.zero_()
451
+ module.weight.data.fill_(1.0)
452
+ elif isinstance(module, MCTCTLayerNorm):
453
+ module.singleton_weight.data.fill_(1.0)
454
+ module.singleton_bias.data.zero_()
455
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
456
+ module.weight.data.normal_(mean=0.0, std=std)
457
+ if module.bias is not None:
458
+ module.bias.data.zero_()
459
+
460
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
461
+ """
462
+ Computes the output length of the convolutional layers
463
+ """
464
+ dilation = 1
465
+ for _, kernel_sz, stride in zip(
466
+ range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride
467
+ ):
468
+ padding = kernel_sz // 2
469
+ input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1
470
+ input_lengths = torch.div(input_lengths, stride, rounding_mode="trunc") + 1
471
+
472
+ return input_lengths
473
+
474
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
475
+ # generate creates 3D attention mask, because of the shape of input_features
476
+ # convert it to 2D if thats the case
477
+ if len(attention_mask.shape) > 2:
478
+ attention_mask = attention_mask[:, :, -1]
479
+
480
+ # subsampled_lengths = attention_mask.sum(-1)
481
+ subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
482
+ bsz = attention_mask.size()[0]
483
+ attention_mask = torch.zeros(
484
+ (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
485
+ )
486
+
487
+ # these two operations makes sure that all values
488
+ # before the output lengths indices are attended to
489
+ attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
490
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
491
+ return attention_mask
492
+
493
+
494
+ MCTCT_START_DOCSTRING = r"""
495
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
496
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
497
+ behavior.
498
+
499
+ Parameters:
500
+ config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model.
501
+ Initializing with a config file does not load the weights associated with the model, only the
502
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
503
+ """
504
+
505
+ MCTCT_INPUTS_DOCSTRING = r"""
506
+ Args:
507
+ input_features (`torch.LongTensor` of shape `({0})`):
508
+ Indices of input sequence tokens in the vocabulary.
509
+
510
+ Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and
511
+ [`PreTrainedTokenizer.__call__`] for details.
512
+
513
+ [What are input IDs?](../glossary#input-ids)
514
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
515
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
516
+
517
+ - 1 for tokens that are **not masked**,
518
+ - 0 for tokens that are **masked**.
519
+
520
+ [What are attention masks?](../glossary#attention-mask)
521
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
522
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
523
+
524
+ - 1 indicates the head is **not masked**,
525
+ - 0 indicates the head is **masked**.
526
+ output_attentions (`bool`, *optional*):
527
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
528
+ tensors for more detail.
529
+ output_hidden_states (`bool`, *optional*):
530
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
531
+ more detail.
532
+ return_dict (`bool`, *optional*):
533
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
534
+ """
535
+
536
+
537
+ class MCTCTEncoder(MCTCTPreTrainedModel):
538
+ def __init__(self, config: MCTCTConfig):
539
+ super().__init__(config)
540
+ self.hidden_dropout_prob = config.hidden_dropout_prob
541
+
542
+ self.layer_norm = MCTCTLayerNorm()
543
+ self.conv = MCTCTConv1dSubsampler(config)
544
+ self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)])
545
+
546
+ self.gradient_checkpointing = False
547
+
548
+ def forward(
549
+ self,
550
+ input_features: torch.Tensor,
551
+ attention_mask: torch.Tensor,
552
+ head_mask: torch.Tensor,
553
+ output_attentions: bool = False,
554
+ output_hidden_states: bool = False,
555
+ return_dict: bool = True,
556
+ ) -> Union[Tuple, BaseModelOutput]:
557
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
558
+ output_hidden_states = (
559
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
560
+ )
561
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
562
+
563
+ input_features = self.layer_norm(input_features)
564
+
565
+ inputs_embeds = self.conv(input_features)
566
+
567
+ # subsample attention mask if necessary
568
+ if attention_mask is not None:
569
+ attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
570
+
571
+ hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training)
572
+
573
+ # expand attention_mask
574
+ if attention_mask is not None:
575
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
576
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
577
+
578
+ encoder_states = () if output_hidden_states else None
579
+ all_attentions = () if output_attentions else None
580
+
581
+ # check if head_mask has a correct number of layers specified if desired
582
+ if head_mask is not None:
583
+ if head_mask.size()[0] != len(self.layers):
584
+ raise ValueError(
585
+ f"The head_mask should be specified for {len(self.layers)} layers, "
586
+ f"but it is for {head_mask.size()[0]}."
587
+ )
588
+
589
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
590
+ for idx, encoder_layer in enumerate(self.layers):
591
+ if output_hidden_states:
592
+ encoder_states = encoder_states + (hidden_states,)
593
+
594
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
595
+ dropout_probability = torch.rand([])
596
+
597
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
598
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
599
+ # under deepspeed zero3 all gpus must run in sync
600
+ if self.gradient_checkpointing and self.training:
601
+ layer_outputs = self._gradient_checkpointing_func(
602
+ encoder_layer.__call__,
603
+ hidden_states,
604
+ attention_mask,
605
+ (head_mask[idx] if head_mask is not None else None),
606
+ output_attentions,
607
+ )
608
+ else:
609
+ layer_outputs = encoder_layer(
610
+ hidden_states=hidden_states,
611
+ attention_mask=attention_mask,
612
+ output_attentions=output_attentions,
613
+ )
614
+
615
+ hidden_states = layer_outputs[0]
616
+
617
+ if skip_the_layer:
618
+ layer_outputs = (None, None)
619
+
620
+ if output_attentions:
621
+ all_attentions = all_attentions + (layer_outputs[1],)
622
+
623
+ if output_hidden_states:
624
+ encoder_states = encoder_states + (hidden_states,)
625
+
626
+ if not return_dict:
627
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
628
+ return BaseModelOutput(
629
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
630
+ )
631
+
632
+
633
+ @add_start_docstrings(
634
+ "The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.",
635
+ MCTCT_START_DOCSTRING,
636
+ )
637
+ class MCTCTModel(MCTCTPreTrainedModel):
638
+ def __init__(self, config):
639
+ super().__init__(config)
640
+ self.config = config
641
+
642
+ self.encoder = MCTCTEncoder(config)
643
+
644
+ # Initialize weights and apply final processing
645
+ self.post_init()
646
+
647
+ @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
648
+ @add_code_sample_docstrings(
649
+ checkpoint=_CHECKPOINT_FOR_DOC,
650
+ output_type=BaseModelOutput,
651
+ config_class=_CONFIG_FOR_DOC,
652
+ modality="audio",
653
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
654
+ )
655
+ def forward(
656
+ self,
657
+ input_features: torch.Tensor,
658
+ attention_mask: Optional[torch.Tensor] = None,
659
+ head_mask: Optional[torch.Tensor] = None,
660
+ output_attentions: Optional[bool] = None,
661
+ output_hidden_states: Optional[bool] = None,
662
+ return_dict: Optional[bool] = None,
663
+ ) -> Union[Tuple, BaseModelOutput]:
664
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
665
+ output_hidden_states = (
666
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
667
+ )
668
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
669
+
670
+ if input_features is None:
671
+ raise ValueError("You have to specify input_features.")
672
+
673
+ encoder_outputs = self.encoder(
674
+ input_features,
675
+ attention_mask=attention_mask,
676
+ head_mask=head_mask,
677
+ output_attentions=output_attentions,
678
+ output_hidden_states=output_hidden_states,
679
+ return_dict=return_dict,
680
+ )
681
+ sequence_output = encoder_outputs[0]
682
+
683
+ if not return_dict:
684
+ return (sequence_output,) + encoder_outputs[1:]
685
+
686
+ return BaseModelOutput(
687
+ last_hidden_state=sequence_output,
688
+ hidden_states=encoder_outputs.hidden_states,
689
+ attentions=encoder_outputs.attentions,
690
+ )
691
+
692
+
693
+ @add_start_docstrings(
694
+ """MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
695
+ MCTCT_START_DOCSTRING,
696
+ )
697
+ class MCTCTForCTC(MCTCTPreTrainedModel):
698
+ def __init__(self, config):
699
+ super().__init__(config)
700
+
701
+ self.mctct = MCTCTModel(config)
702
+
703
+ if config.vocab_size is None:
704
+ raise ValueError(
705
+ f"You are trying to instantiate {self.__class__} with a configuration that "
706
+ "does not define the vocabulary size of the language model head. Please "
707
+ "instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
708
+ "or define `vocab_size` of your model's configuration."
709
+ )
710
+ output_hidden_size = config.hidden_size
711
+
712
+ self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size)
713
+
714
+ # Initialize weights and apply final processing
715
+ self.post_init()
716
+
717
+ @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)
718
+ @add_code_sample_docstrings(
719
+ checkpoint=_CHECKPOINT_FOR_DOC,
720
+ output_type=CausalLMOutput,
721
+ config_class=_CONFIG_FOR_DOC,
722
+ expected_output=_CTC_EXPECTED_OUTPUT,
723
+ expected_loss=_CTC_EXPECTED_LOSS,
724
+ )
725
+ def forward(
726
+ self,
727
+ input_features: torch.Tensor,
728
+ attention_mask: Optional[torch.Tensor] = None,
729
+ head_mask: Optional[torch.Tensor] = None,
730
+ output_attentions: Optional[bool] = None,
731
+ output_hidden_states: Optional[bool] = None,
732
+ return_dict: Optional[bool] = None,
733
+ labels: Optional[torch.LongTensor] = None,
734
+ ) -> Union[Tuple, CausalLMOutput]:
735
+ r"""
736
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
737
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
738
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
739
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
740
+ config.vocab_size - 1]`.
741
+ """
742
+
743
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
744
+ outputs = self.mctct(
745
+ input_features,
746
+ attention_mask=attention_mask,
747
+ head_mask=head_mask,
748
+ output_attentions=output_attentions,
749
+ output_hidden_states=output_hidden_states,
750
+ return_dict=return_dict,
751
+ )
752
+
753
+ hidden_states = outputs[0]
754
+
755
+ logits = self.ctc_head(hidden_states)
756
+
757
+ loss = None
758
+ if labels is not None:
759
+ if labels.max() >= self.config.vocab_size:
760
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
761
+
762
+ # retrieve loss input_lengths from attention_mask
763
+ attention_mask = (
764
+ attention_mask
765
+ if attention_mask is not None
766
+ else torch.ones(input_features.shape[:-1], dtype=torch.long)
767
+ )
768
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
769
+ # assuming that padded tokens are filled with -100
770
+ # when not being attended to
771
+ labels_mask = labels >= 0
772
+ target_lengths = labels_mask.sum(-1)
773
+ flattened_targets = labels.masked_select(labels_mask)
774
+
775
+ # ctc_loss doesn't support fp16
776
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
777
+
778
+ with torch.backends.cudnn.flags(enabled=False):
779
+ loss = nn.functional.ctc_loss(
780
+ log_probs,
781
+ flattened_targets,
782
+ input_lengths,
783
+ target_lengths,
784
+ blank=self.config.pad_token_id,
785
+ reduction=self.config.ctc_loss_reduction,
786
+ zero_infinity=self.config.ctc_zero_infinity,
787
+ )
788
+
789
+ if not return_dict:
790
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
791
+ return ((loss,) + output) if loss is not None else output
792
+
793
+ return CausalLMOutput(
794
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
795
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Speech processor class for M-CTC-T
17
+ """
18
+ import warnings
19
+ from contextlib import contextmanager
20
+
21
+ from ....processing_utils import ProcessorMixin
22
+
23
+
24
+ class MCTCTProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor.
27
+
28
+ [`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the
29
+ [`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information.
30
+
31
+ Args:
32
+ feature_extractor (`MCTCTFeatureExtractor`):
33
+ An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input.
34
+ tokenizer (`AutoTokenizer`):
35
+ An instance of [`AutoTokenizer`]. The tokenizer is a required input.
36
+ """
37
+
38
+ feature_extractor_class = "MCTCTFeatureExtractor"
39
+ tokenizer_class = "AutoTokenizer"
40
+
41
+ def __init__(self, feature_extractor, tokenizer):
42
+ super().__init__(feature_extractor, tokenizer)
43
+ self.current_processor = self.feature_extractor
44
+ self._in_target_context_manager = False
45
+
46
+ def __call__(self, *args, **kwargs):
47
+ """
48
+ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
49
+ [`~MCTCTFeatureExtractor.__call__`] and returns its output. If used in the context
50
+ [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to AutoTokenizer's
51
+ [`~AutoTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
52
+ """
53
+ # For backward compatibility
54
+ if self._in_target_context_manager:
55
+ return self.current_processor(*args, **kwargs)
56
+
57
+ if "raw_speech" in kwargs:
58
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
59
+ audio = kwargs.pop("raw_speech")
60
+ else:
61
+ audio = kwargs.pop("audio", None)
62
+ sampling_rate = kwargs.pop("sampling_rate", None)
63
+ text = kwargs.pop("text", None)
64
+ if len(args) > 0:
65
+ audio = args[0]
66
+ args = args[1:]
67
+
68
+ if audio is None and text is None:
69
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
70
+
71
+ if audio is not None:
72
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
73
+ if text is not None:
74
+ encodings = self.tokenizer(text, **kwargs)
75
+
76
+ if text is None:
77
+ return inputs
78
+ elif audio is None:
79
+ return encodings
80
+ else:
81
+ inputs["labels"] = encodings["input_ids"]
82
+ return inputs
83
+
84
+ def batch_decode(self, *args, **kwargs):
85
+ """
86
+ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
87
+ to the docstring of this method for more information.
88
+ """
89
+ return self.tokenizer.batch_decode(*args, **kwargs)
90
+
91
+ def pad(self, *args, **kwargs):
92
+ """
93
+ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
94
+ [`~MCTCTFeatureExtractor.pad`] and returns its output. If used in the context
95
+ [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's
96
+ [`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information.
97
+ """
98
+ # For backward compatibility
99
+ if self._in_target_context_manager:
100
+ return self.current_processor.pad(*args, **kwargs)
101
+
102
+ input_features = kwargs.pop("input_features", None)
103
+ labels = kwargs.pop("labels", None)
104
+ if len(args) > 0:
105
+ input_features = args[0]
106
+ args = args[1:]
107
+
108
+ if input_features is not None:
109
+ input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
110
+ if labels is not None:
111
+ labels = self.tokenizer.pad(labels, **kwargs)
112
+
113
+ if labels is None:
114
+ return input_features
115
+ elif input_features is None:
116
+ return labels
117
+ else:
118
+ input_features["labels"] = labels["input_ids"]
119
+ return input_features
120
+
121
+ def decode(self, *args, **kwargs):
122
+ """
123
+ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
124
+ docstring of this method for more information.
125
+ """
126
+ return self.tokenizer.decode(*args, **kwargs)
127
+
128
+ @contextmanager
129
+ def as_target_processor(self):
130
+ """
131
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning MCTCT.
132
+ """
133
+ warnings.warn(
134
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
135
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
136
+ "your audio inputs, or in a separate call."
137
+ )
138
+ self._in_target_context_manager = True
139
+ self.current_processor = self.tokenizer
140
+ yield
141
+ self.current_processor = self.feature_extractor
142
+ self._in_target_context_manager = False
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__init__.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {"configuration_gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig", "GPTJOnnxConfig"]}
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_gptj"] = [
34
+ "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "GPTJForCausalLM",
36
+ "GPTJForQuestionAnswering",
37
+ "GPTJForSequenceClassification",
38
+ "GPTJModel",
39
+ "GPTJPreTrainedModel",
40
+ ]
41
+
42
+ try:
43
+ if not is_tf_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ _import_structure["modeling_tf_gptj"] = [
49
+ "TFGPTJForCausalLM",
50
+ "TFGPTJForQuestionAnswering",
51
+ "TFGPTJForSequenceClassification",
52
+ "TFGPTJModel",
53
+ "TFGPTJPreTrainedModel",
54
+ ]
55
+
56
+ try:
57
+ if not is_flax_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ _import_structure["modeling_flax_gptj"] = [
63
+ "FlaxGPTJForCausalLM",
64
+ "FlaxGPTJModel",
65
+ "FlaxGPTJPreTrainedModel",
66
+ ]
67
+
68
+
69
+ if TYPE_CHECKING:
70
+ from .configuration_gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig, GPTJOnnxConfig
71
+
72
+ try:
73
+ if not is_torch_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .modeling_gptj import (
79
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST,
80
+ GPTJForCausalLM,
81
+ GPTJForQuestionAnswering,
82
+ GPTJForSequenceClassification,
83
+ GPTJModel,
84
+ GPTJPreTrainedModel,
85
+ )
86
+
87
+ try:
88
+ if not is_tf_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ from .modeling_tf_gptj import (
94
+ TFGPTJForCausalLM,
95
+ TFGPTJForQuestionAnswering,
96
+ TFGPTJForSequenceClassification,
97
+ TFGPTJModel,
98
+ TFGPTJPreTrainedModel,
99
+ )
100
+
101
+ try:
102
+ if not is_flax_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel
108
+
109
+ else:
110
+ import sys
111
+
112
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc ADDED
Binary file (38.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc ADDED
Binary file (33.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPT-J model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP = {
28
+ "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
29
+ # See all GPT-J models at https://huggingface.co/models?filter=gpt_j
30
+ }
31
+
32
+
33
+ class GPTJConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
36
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
37
+ defaults will yield a similar configuration to that of the GPT-J
38
+ [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
39
+ [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
40
+ for more information.
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 50400):
44
+ Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`GPTJModel`].
46
+ n_positions (`int`, *optional*, defaults to 2048):
47
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
48
+ just in case (e.g., 512 or 1024 or 2048).
49
+ n_embd (`int`, *optional*, defaults to 4096):
50
+ Dimensionality of the embeddings and hidden states.
51
+ n_layer (`int`, *optional*, defaults to 28):
52
+ Number of hidden layers in the Transformer encoder.
53
+ n_head (`int`, *optional*, defaults to 16):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ rotary_dim (`int`, *optional*, defaults to 64):
56
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
57
+ n_inner (`int`, *optional*, defaults to None):
58
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
59
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
60
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the embeddings.
65
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the attention.
67
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
68
+ The epsilon to use in the layer normalization layers.
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models).
73
+
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import GPTJModel, GPTJConfig
78
+
79
+ >>> # Initializing a GPT-J 6B configuration
80
+ >>> configuration = GPTJConfig()
81
+
82
+ >>> # Initializing a model from the configuration
83
+ >>> model = GPTJModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+
89
+ model_type = "gptj"
90
+ attribute_map = {
91
+ "max_position_embeddings": "n_positions",
92
+ "hidden_size": "n_embd",
93
+ "num_attention_heads": "n_head",
94
+ "num_hidden_layers": "n_layer",
95
+ }
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=50400,
100
+ n_positions=2048,
101
+ n_embd=4096,
102
+ n_layer=28,
103
+ n_head=16,
104
+ rotary_dim=64,
105
+ n_inner=None,
106
+ activation_function="gelu_new",
107
+ resid_pdrop=0.0,
108
+ embd_pdrop=0.0,
109
+ attn_pdrop=0.0,
110
+ layer_norm_epsilon=1e-5,
111
+ initializer_range=0.02,
112
+ use_cache=True,
113
+ bos_token_id=50256,
114
+ eos_token_id=50256,
115
+ tie_word_embeddings=False,
116
+ **kwargs,
117
+ ):
118
+ self.vocab_size = vocab_size
119
+ self.n_positions = n_positions
120
+ self.n_embd = n_embd
121
+ self.n_layer = n_layer
122
+ self.n_head = n_head
123
+ self.n_inner = n_inner
124
+ self.rotary_dim = rotary_dim
125
+ self.activation_function = activation_function
126
+ self.resid_pdrop = resid_pdrop
127
+ self.embd_pdrop = embd_pdrop
128
+ self.attn_pdrop = attn_pdrop
129
+ self.layer_norm_epsilon = layer_norm_epsilon
130
+ self.initializer_range = initializer_range
131
+ self.use_cache = use_cache
132
+
133
+ self.bos_token_id = bos_token_id
134
+ self.eos_token_id = eos_token_id
135
+
136
+ super().__init__(
137
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
138
+ )
139
+
140
+
141
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
142
+ class GPTJOnnxConfig(OnnxConfigWithPast):
143
+ def __init__(
144
+ self,
145
+ config: PretrainedConfig,
146
+ task: str = "default",
147
+ patching_specs: List[PatchingSpec] = None,
148
+ use_past: bool = False,
149
+ ):
150
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
151
+ if not getattr(self._config, "pad_token_id", None):
152
+ # TODO: how to do that better?
153
+ self._config.pad_token_id = 0
154
+
155
+ @property
156
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
157
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
158
+ if self.use_past:
159
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
160
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
161
+ else:
162
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
163
+
164
+ return common_inputs
165
+
166
+ @property
167
+ def num_layers(self) -> int:
168
+ return self._config.n_layer
169
+
170
+ @property
171
+ def num_attention_heads(self) -> int:
172
+ return self._config.n_head
173
+
174
+ def generate_dummy_inputs(
175
+ self,
176
+ tokenizer: PreTrainedTokenizer,
177
+ batch_size: int = -1,
178
+ seq_length: int = -1,
179
+ is_pair: bool = False,
180
+ framework: Optional[TensorType] = None,
181
+ ) -> Mapping[str, Any]:
182
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
183
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
184
+ )
185
+
186
+ # We need to order the input in the way they appears in the forward()
187
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
188
+
189
+ # Need to add the past_keys
190
+ if self.use_past:
191
+ if not is_torch_available():
192
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
193
+ else:
194
+ import torch
195
+
196
+ batch, seqlen = common_inputs["input_ids"].shape
197
+ # Not using the same length for past_key_values
198
+ past_key_values_length = seqlen + 2
199
+ past_shape = (
200
+ batch,
201
+ self.num_attention_heads,
202
+ past_key_values_length,
203
+ self._config.hidden_size // self.num_attention_heads,
204
+ )
205
+ ordered_inputs["past_key_values"] = [
206
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
207
+ ]
208
+
209
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
210
+ if self.use_past:
211
+ mask_dtype = ordered_inputs["attention_mask"].dtype
212
+ ordered_inputs["attention_mask"] = torch.cat(
213
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
214
+ )
215
+
216
+ return ordered_inputs
217
+
218
+ @property
219
+ def default_onnx_opset(self) -> int:
220
+ return 13
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from functools import partial
17
+ from typing import Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen.attention import dot_product_attention_weights
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+ from jax import lax
28
+
29
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
30
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
31
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_gptj import GPTJConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CHECKPOINT_FOR_DOC = "gptj"
38
+ _CONFIG_FOR_DOC = "GPTJConfig"
39
+
40
+
41
+ GPTJ_START_DOCSTRING = r"""
42
+
43
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
44
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
45
+ etc.)
46
+
47
+ This model is also a Flax Linen
48
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
49
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
50
+
51
+ Finally, this model supports inherent JAX features such as:
52
+
53
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
54
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
55
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
56
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
57
+
58
+ Parameters:
59
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
60
+ Initializing with a config file does not load the weights associated with the model, only the
61
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
62
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
63
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
64
+ `jax.numpy.bfloat16` (on TPUs).
65
+
66
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
67
+ specified all the computation will be performed with the given `dtype`.
68
+
69
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
70
+ parameters.**
71
+
72
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
73
+ [`~FlaxPreTrainedModel.to_bf16`].
74
+ """
75
+
76
+ GPTJ_INPUTS_DOCSTRING = r"""
77
+ Args:
78
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
79
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
80
+
81
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
82
+ [`PreTrainedTokenizer.__call__`] for details.
83
+
84
+ [What are input IDs?](../glossary#input-ids)
85
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
86
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
87
+
88
+ - 1 for tokens that are **not masked**,
89
+ - 0 for tokens that are **masked**.
90
+
91
+ [What are attention masks?](../glossary#attention-mask)
92
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
93
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
94
+ config.max_position_embeddings - 1]`.
95
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
96
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
97
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
98
+ output_attentions (`bool`, *optional*):
99
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
100
+ tensors for more detail.
101
+ output_hidden_states (`bool`, *optional*):
102
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
103
+ more detail.
104
+ return_dict (`bool`, *optional*):
105
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
106
+ """
107
+
108
+
109
+ def create_sinusoidal_positions(num_pos, dim):
110
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
111
+ sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
112
+ sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
113
+
114
+ sentinel = dim // 2 + dim % 2
115
+ out = np.zeros((num_pos, dim))
116
+ out[:, 0:sentinel] = sin
117
+ out[:, sentinel:] = cos
118
+
119
+ return jnp.array(out)
120
+
121
+
122
+ def rotate_every_two(tensor):
123
+ rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
124
+ rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
125
+ return rotate_half_tensor
126
+
127
+
128
+ def apply_rotary_pos_emb(tensor, sincos):
129
+ sin_pos, cos_pos = sincos
130
+ sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
131
+ cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
132
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
133
+
134
+
135
+ class FlaxGPTJAttention(nn.Module):
136
+ config: GPTJConfig
137
+ dtype: jnp.dtype = jnp.float32
138
+ causal: bool = True
139
+ is_cross_attention: bool = False
140
+
141
+ def setup(self):
142
+ config = self.config
143
+ self.embed_dim = config.hidden_size
144
+ self.num_heads = config.num_attention_heads
145
+ self.head_dim = self.embed_dim // self.num_heads
146
+
147
+ self.rotary_dim = config.rotary_dim
148
+
149
+ dense = partial(
150
+ nn.Dense,
151
+ self.embed_dim,
152
+ use_bias=False,
153
+ dtype=self.dtype,
154
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
155
+ )
156
+
157
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
158
+ self.out_proj = dense()
159
+
160
+ self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
161
+
162
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
163
+
164
+ pos_embd_dim = self.rotary_dim or self.embed_dim
165
+ self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim)
166
+
167
+ def _split_heads(self, hidden_states):
168
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
169
+
170
+ def _merge_heads(self, hidden_states):
171
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
172
+
173
+ @nn.compact
174
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
175
+ """
176
+ This function takes projected key, value states from a single input token and concatenates the states to cached
177
+ states from previous steps. This function is slighly adapted from the official Flax repository:
178
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
179
+ """
180
+ # detect if we're initializing by absence of existing cache data.
181
+ is_initialized = self.has_variable("cache", "cached_key")
182
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
183
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
184
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
185
+
186
+ if is_initialized:
187
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
188
+ # update key, value caches with our new 1d spatial slices
189
+ cur_index = cache_index.value
190
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
191
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
192
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
193
+ cached_key.value = key
194
+ cached_value.value = value
195
+ num_updated_cache_vectors = query.shape[1]
196
+ cache_index.value = cache_index.value + num_updated_cache_vectors
197
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key
198
+ # positions that have already been generated and cached, not the remaining zero elements.
199
+ pad_mask = jnp.broadcast_to(
200
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
201
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
202
+ )
203
+ attention_mask = combine_masks(pad_mask, attention_mask)
204
+ return key, value, attention_mask
205
+
206
+ def __call__(
207
+ self,
208
+ hidden_states,
209
+ attention_mask,
210
+ position_ids,
211
+ deterministic: bool = True,
212
+ init_cache: bool = False,
213
+ output_attentions: bool = False,
214
+ ):
215
+ query = self.q_proj(hidden_states)
216
+ key = self.k_proj(hidden_states)
217
+ value = self.v_proj(hidden_states)
218
+
219
+ query = self._split_heads(query)
220
+ key = self._split_heads(key)
221
+ value = self._split_heads(value)
222
+
223
+ sincos = jnp.take(self.embed_positions, position_ids, axis=0)
224
+ sincos = jnp.split(sincos, 2, axis=-1)
225
+ if self.rotary_dim is not None:
226
+ k_rot = key[:, :, :, : self.rotary_dim]
227
+ k_pass = key[:, :, :, self.rotary_dim :]
228
+
229
+ q_rot = query[:, :, :, : self.rotary_dim]
230
+ q_pass = query[:, :, :, self.rotary_dim :]
231
+
232
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
233
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
234
+
235
+ key = jnp.concatenate([k_rot, k_pass], axis=-1)
236
+ query = jnp.concatenate([q_rot, q_pass], axis=-1)
237
+ else:
238
+ key = apply_rotary_pos_emb(key, sincos)
239
+ query = apply_rotary_pos_emb(query, sincos)
240
+
241
+ query_length, key_length = query.shape[1], key.shape[1]
242
+
243
+ if self.has_variable("cache", "cached_key"):
244
+ mask_shift = self.variables["cache"]["cache_index"]
245
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
246
+ causal_mask = lax.dynamic_slice(
247
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
248
+ )
249
+ else:
250
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
251
+
252
+ batch_size = hidden_states.shape[0]
253
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
254
+
255
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
256
+ attention_mask = combine_masks(attention_mask, causal_mask)
257
+
258
+ dropout_rng = None
259
+ if not deterministic and self.config.attn_pdrop > 0.0:
260
+ dropout_rng = self.make_rng("dropout")
261
+
262
+ # During fast autoregressive decoding, we feed one position at a time,
263
+ # and cache the keys and values step by step.
264
+ if self.has_variable("cache", "cached_key") or init_cache:
265
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
266
+
267
+ # transform boolean mask into float mask
268
+ attention_bias = lax.select(
269
+ attention_mask > 0,
270
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
271
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
272
+ )
273
+
274
+ # usual dot product attention
275
+ attn_weights = dot_product_attention_weights(
276
+ query,
277
+ key,
278
+ bias=attention_bias,
279
+ dropout_rng=dropout_rng,
280
+ dropout_rate=self.config.attn_pdrop,
281
+ deterministic=deterministic,
282
+ dtype=self.dtype,
283
+ precision=None,
284
+ )
285
+
286
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
287
+ attn_output = self._merge_heads(attn_output)
288
+ attn_output = self.out_proj(attn_output)
289
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
290
+
291
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
292
+ return outputs
293
+
294
+
295
+ class FlaxGPTJMLP(nn.Module):
296
+ config: GPTJConfig
297
+ intermediate_size: int
298
+ dtype: jnp.dtype = jnp.float32
299
+
300
+ def setup(self):
301
+ embed_dim = self.config.hidden_size
302
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
303
+
304
+ self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init)
305
+ self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init)
306
+
307
+ self.act = ACT2FN[self.config.activation_function]
308
+ self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
309
+
310
+ def __call__(self, hidden_states, deterministic: bool = True):
311
+ hidden_states = self.fc_in(hidden_states)
312
+ hidden_states = self.act(hidden_states)
313
+ hidden_states = self.fc_out(hidden_states)
314
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
315
+ return hidden_states
316
+
317
+
318
+ class FlaxGPTJBlock(nn.Module):
319
+ config: GPTJConfig
320
+ dtype: jnp.dtype = jnp.float32
321
+
322
+ def setup(self):
323
+ hidden_size = self.config.hidden_size
324
+ inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
325
+
326
+ self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
327
+ self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype)
328
+
329
+ self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype)
330
+
331
+ def __call__(
332
+ self,
333
+ hidden_states,
334
+ attention_mask=None,
335
+ position_ids=None,
336
+ deterministic: bool = True,
337
+ init_cache: bool = False,
338
+ output_attentions: bool = False,
339
+ ):
340
+ residual = hidden_states
341
+ hidden_states = self.ln_1(hidden_states)
342
+ attn_outputs = self.attn(
343
+ hidden_states,
344
+ attention_mask=attention_mask,
345
+ position_ids=position_ids,
346
+ deterministic=deterministic,
347
+ init_cache=init_cache,
348
+ output_attentions=output_attentions,
349
+ )
350
+ attn_output = attn_outputs[0]
351
+
352
+ feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
353
+ # residual connection
354
+ hidden_states = attn_output + feed_forward_hidden_states + residual
355
+
356
+ return (hidden_states,) + attn_outputs[1:]
357
+
358
+
359
+ class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel):
360
+ """
361
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
362
+ models.
363
+ """
364
+
365
+ config_class = GPTJConfig
366
+ base_model_prefix = "transformer"
367
+ module_class: nn.Module = None
368
+
369
+ def __init__(
370
+ self,
371
+ config: GPTJConfig,
372
+ input_shape: Tuple = (1, 1),
373
+ seed: int = 0,
374
+ dtype: jnp.dtype = jnp.float32,
375
+ _do_init: bool = True,
376
+ **kwargs,
377
+ ):
378
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
379
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
380
+
381
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
382
+ # init input tensors
383
+ input_ids = jnp.zeros(input_shape, dtype="i4")
384
+ attention_mask = jnp.ones_like(input_ids)
385
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
386
+ params_rng, dropout_rng = jax.random.split(rng)
387
+ rngs = {"params": params_rng, "dropout": dropout_rng}
388
+
389
+ if self.config.add_cross_attention:
390
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
391
+ encoder_attention_mask = attention_mask
392
+ module_init_outputs = self.module.init(
393
+ rngs,
394
+ input_ids,
395
+ attention_mask,
396
+ position_ids,
397
+ encoder_hidden_states,
398
+ encoder_attention_mask,
399
+ return_dict=False,
400
+ )
401
+ else:
402
+ module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
403
+
404
+ random_params = module_init_outputs["params"]
405
+
406
+ if params is not None:
407
+ random_params = flatten_dict(unfreeze(random_params))
408
+ params = flatten_dict(unfreeze(params))
409
+ for missing_key in self._missing_keys:
410
+ params[missing_key] = random_params[missing_key]
411
+ self._missing_keys = set()
412
+ return freeze(unflatten_dict(params))
413
+ else:
414
+ return random_params
415
+
416
+ def init_cache(self, batch_size, max_length):
417
+ r"""
418
+ Args:
419
+ batch_size (`int`):
420
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
421
+ max_length (`int`):
422
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
423
+ cache.
424
+ """
425
+ # init input variables to retrieve cache
426
+ input_ids = jnp.ones((batch_size, max_length))
427
+ attention_mask = jnp.ones_like(input_ids)
428
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
429
+
430
+ init_variables = self.module.init(
431
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
432
+ )
433
+ return init_variables["cache"]
434
+
435
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
436
+ def __call__(
437
+ self,
438
+ input_ids,
439
+ attention_mask=None,
440
+ position_ids=None,
441
+ params: dict = None,
442
+ past_key_values: dict = None,
443
+ dropout_rng: jax.random.PRNGKey = None,
444
+ train: bool = False,
445
+ output_attentions: Optional[bool] = None,
446
+ output_hidden_states: Optional[bool] = None,
447
+ return_dict: Optional[bool] = None,
448
+ ):
449
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
450
+ output_hidden_states = (
451
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
452
+ )
453
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
454
+
455
+ batch_size, sequence_length = input_ids.shape
456
+
457
+ if position_ids is None:
458
+ if past_key_values is not None:
459
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
460
+
461
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
462
+
463
+ if attention_mask is None:
464
+ attention_mask = jnp.ones((batch_size, sequence_length))
465
+
466
+ # Handle any PRNG if needed
467
+ rngs = {}
468
+ if dropout_rng is not None:
469
+ rngs["dropout"] = dropout_rng
470
+
471
+ inputs = {"params": params or self.params}
472
+
473
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
474
+ if past_key_values:
475
+ inputs["cache"] = past_key_values
476
+ mutable = ["cache"]
477
+ else:
478
+ mutable = False
479
+
480
+ outputs = self.module.apply(
481
+ inputs,
482
+ jnp.array(input_ids, dtype="i4"),
483
+ jnp.array(attention_mask, dtype="i4"),
484
+ jnp.array(position_ids, dtype="i4"),
485
+ not train,
486
+ False,
487
+ output_attentions,
488
+ output_hidden_states,
489
+ return_dict,
490
+ rngs=rngs,
491
+ mutable=mutable,
492
+ )
493
+
494
+ # add updated cache to model output
495
+ if past_key_values is not None and return_dict:
496
+ outputs, past_key_values = outputs
497
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
498
+ return outputs
499
+ elif past_key_values is not None and not return_dict:
500
+ outputs, past_key_values = outputs
501
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
502
+
503
+ return outputs
504
+
505
+
506
+ class FlaxGPTJBlockCollection(nn.Module):
507
+ config: GPTJConfig
508
+ dtype: jnp.dtype = jnp.float32
509
+
510
+ def setup(self):
511
+ self.blocks = [
512
+ FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
513
+ ]
514
+
515
+ def __call__(
516
+ self,
517
+ hidden_states,
518
+ attention_mask=None,
519
+ position_ids=None,
520
+ deterministic: bool = True,
521
+ init_cache: bool = False,
522
+ output_attentions: bool = False,
523
+ output_hidden_states: bool = False,
524
+ return_dict: bool = True,
525
+ ):
526
+ all_attentions = () if output_attentions else None
527
+ all_hidden_states = () if output_hidden_states else None
528
+
529
+ for block in self.blocks:
530
+ if output_hidden_states:
531
+ all_hidden_states += (hidden_states,)
532
+
533
+ layer_outputs = block(
534
+ hidden_states,
535
+ attention_mask,
536
+ position_ids=position_ids,
537
+ deterministic=deterministic,
538
+ init_cache=init_cache,
539
+ output_attentions=output_attentions,
540
+ )
541
+ hidden_states = layer_outputs[0]
542
+
543
+ if output_attentions:
544
+ all_attentions += (layer_outputs[1],)
545
+
546
+ # this contains possible `None` values - `FlaxGPTJModule` will filter them out
547
+ outputs = (hidden_states, all_hidden_states, all_attentions)
548
+
549
+ return outputs
550
+
551
+
552
+ class FlaxGPTJModule(nn.Module):
553
+ config: GPTJConfig
554
+ dtype: jnp.dtype = jnp.float32
555
+
556
+ def setup(self):
557
+ self.embed_dim = self.config.hidden_size
558
+
559
+ self.wte = nn.Embed(
560
+ self.config.vocab_size,
561
+ self.config.hidden_size,
562
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
563
+ )
564
+ self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
565
+ self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype)
566
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
567
+
568
+ def __call__(
569
+ self,
570
+ input_ids,
571
+ attention_mask,
572
+ position_ids,
573
+ deterministic=True,
574
+ init_cache: bool = False,
575
+ output_attentions: bool = False,
576
+ output_hidden_states: bool = False,
577
+ return_dict: bool = True,
578
+ ):
579
+ input_embeds = self.wte(input_ids.astype("i4"))
580
+
581
+ hidden_states = self.dropout(input_embeds, deterministic=deterministic)
582
+
583
+ outputs = self.h(
584
+ hidden_states,
585
+ attention_mask,
586
+ position_ids=position_ids,
587
+ deterministic=deterministic,
588
+ init_cache=init_cache,
589
+ output_attentions=output_attentions,
590
+ output_hidden_states=output_hidden_states,
591
+ return_dict=return_dict,
592
+ )
593
+
594
+ hidden_states = outputs[0]
595
+ hidden_states = self.ln_f(hidden_states)
596
+
597
+ if output_hidden_states:
598
+ all_hidden_states = outputs[1] + (hidden_states,)
599
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
600
+ else:
601
+ outputs = (hidden_states,) + outputs[1:]
602
+
603
+ if not return_dict:
604
+ return tuple(v for v in outputs if v is not None)
605
+
606
+ return FlaxBaseModelOutput(
607
+ last_hidden_state=hidden_states,
608
+ hidden_states=outputs[1],
609
+ attentions=outputs[-1],
610
+ )
611
+
612
+
613
+ @add_start_docstrings(
614
+ "The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.",
615
+ GPTJ_START_DOCSTRING,
616
+ )
617
+ class FlaxGPTJModel(FlaxGPTJPreTrainedModel):
618
+ module_class = FlaxGPTJModule
619
+
620
+
621
+ append_call_sample_docstring(
622
+ FlaxGPTJModel,
623
+ _CHECKPOINT_FOR_DOC,
624
+ FlaxCausalLMOutput,
625
+ _CONFIG_FOR_DOC,
626
+ )
627
+
628
+
629
+ class FlaxGPTJForCausalLMModule(nn.Module):
630
+ config: GPTJConfig
631
+ dtype: jnp.dtype = jnp.float32
632
+
633
+ def setup(self):
634
+ self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype)
635
+ self.lm_head = nn.Dense(
636
+ self.config.vocab_size,
637
+ dtype=self.dtype,
638
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
639
+ )
640
+
641
+ def __call__(
642
+ self,
643
+ input_ids,
644
+ attention_mask,
645
+ position_ids,
646
+ deterministic: bool = True,
647
+ init_cache: bool = False,
648
+ output_attentions: bool = False,
649
+ output_hidden_states: bool = False,
650
+ return_dict: bool = True,
651
+ ):
652
+ outputs = self.transformer(
653
+ input_ids,
654
+ attention_mask,
655
+ position_ids,
656
+ deterministic=deterministic,
657
+ init_cache=init_cache,
658
+ output_attentions=output_attentions,
659
+ output_hidden_states=output_hidden_states,
660
+ return_dict=return_dict,
661
+ )
662
+
663
+ hidden_states = outputs[0]
664
+
665
+ if self.config.tie_word_embeddings:
666
+ shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
667
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
668
+ else:
669
+ lm_logits = self.lm_head(hidden_states)
670
+
671
+ if not return_dict:
672
+ return (lm_logits,) + outputs[1:]
673
+
674
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
675
+
676
+
677
+ @add_start_docstrings(
678
+ """
679
+ The GPTJ Model transformer with a language modeling head on top.
680
+ """,
681
+ GPTJ_START_DOCSTRING,
682
+ )
683
+ class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel):
684
+ module_class = FlaxGPTJForCausalLMModule
685
+
686
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
687
+ # initializing the cache
688
+ batch_size, seq_length = input_ids.shape
689
+
690
+ past_key_values = self.init_cache(batch_size, max_length)
691
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
692
+ # But since GPTJ uses a causal mask, those positions are masked anyways.
693
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
694
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
695
+ if attention_mask is not None:
696
+ position_ids = attention_mask.cumsum(axis=-1) - 1
697
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
698
+ else:
699
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
700
+
701
+ return {
702
+ "past_key_values": past_key_values,
703
+ "attention_mask": extended_attention_mask,
704
+ "position_ids": position_ids,
705
+ }
706
+
707
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
708
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
709
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
710
+ return model_kwargs
711
+
712
+
713
+ append_call_sample_docstring(
714
+ FlaxGPTJForCausalLM,
715
+ _CHECKPOINT_FOR_DOC,
716
+ FlaxCausalLMOutput,
717
+ _CONFIG_FOR_DOC,
718
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py ADDED
@@ -0,0 +1,1430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPT-J model."""
16
+
17
+ import warnings
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.fx
22
+ import torch.nn.functional as F
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ CausalLMOutputWithPast,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutputWithPast,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_flash_attn_2_available,
40
+ is_flash_attn_greater_or_equal_2_10,
41
+ is_torch_fx_proxy,
42
+ logging,
43
+ )
44
+ from ...utils.model_parallel_utils import assert_device_map, get_device_map
45
+ from .configuration_gptj import GPTJConfig
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
56
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
57
+ _CONFIG_FOR_DOC = "GPTJConfig"
58
+
59
+
60
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
61
+ "EleutherAI/gpt-j-6B",
62
+ # See all GPT-J models at https://huggingface.co/models?filter=gptj
63
+ ]
64
+
65
+
66
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
67
+ def _get_unpad_data(attention_mask):
68
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
69
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
70
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
71
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
72
+ return (
73
+ indices,
74
+ cu_seqlens,
75
+ max_seqlen_in_batch,
76
+ )
77
+
78
+
79
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
80
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
81
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
82
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
83
+
84
+
85
+ @torch.fx.wrap
86
+ def get_embed_positions(embed_positions, position_ids):
87
+ return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
88
+
89
+
90
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
91
+ x1 = x[:, :, :, ::2]
92
+ x2 = x[:, :, :, 1::2]
93
+ x = torch.stack((-x2, x1), dim=-1)
94
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
95
+
96
+
97
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
98
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
99
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
100
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
101
+
102
+
103
+ class GPTJAttention(nn.Module):
104
+ def __init__(self, config):
105
+ super().__init__()
106
+ self.config = config
107
+ max_positions = config.max_position_embeddings
108
+ self.register_buffer(
109
+ "bias",
110
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
111
+ 1, 1, max_positions, max_positions
112
+ ),
113
+ persistent=False,
114
+ )
115
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
116
+
117
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
118
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
119
+
120
+ self.is_causal = True
121
+
122
+ self.embed_dim = config.hidden_size
123
+ self.num_attention_heads = config.num_attention_heads
124
+ self.head_dim = self.embed_dim // self.num_attention_heads
125
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
126
+ raise ValueError(
127
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
128
+ f" `num_attention_heads`: {self.num_attention_heads})."
129
+ )
130
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
131
+
132
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
133
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
134
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
135
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
136
+ self.rotary_dim = config.rotary_dim
137
+ pos_embd_dim = self.rotary_dim or self.embed_dim
138
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
139
+
140
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
141
+ """
142
+ Splits hidden dim into attn_head_size and num_attention_heads
143
+ """
144
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
145
+ tensor = tensor.view(new_shape)
146
+ if rotary:
147
+ return tensor
148
+ if len(tensor.shape) == 5:
149
+ return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
150
+ elif len(tensor.shape) == 4:
151
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
152
+ else:
153
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
154
+
155
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
156
+ """
157
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
158
+ """
159
+ if len(tensor.shape) == 5:
160
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
161
+ elif len(tensor.shape) == 4:
162
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
163
+ else:
164
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
165
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
166
+ return tensor.view(new_shape)
167
+
168
+ def _attn(
169
+ self,
170
+ query,
171
+ key,
172
+ value,
173
+ attention_mask=None,
174
+ head_mask=None,
175
+ ):
176
+ # compute causal mask from causal mask buffer
177
+ query_length, key_length = query.size(-2), key.size(-2)
178
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
179
+
180
+ # Keep the attention weights computation in fp32 to avoid overflow issues
181
+ query = query.to(torch.float32)
182
+ key = key.to(torch.float32)
183
+
184
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
185
+
186
+ mask_value = torch.finfo(attn_weights.dtype).min
187
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
188
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
189
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
190
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
191
+
192
+ attn_weights = attn_weights / self.scale_attn
193
+
194
+ if attention_mask is not None:
195
+ # Apply the attention mask
196
+ attn_weights = attn_weights + attention_mask
197
+
198
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
199
+ attn_weights = attn_weights.to(value.dtype)
200
+ attn_weights = self.attn_dropout(attn_weights)
201
+
202
+ # Mask heads if we want to
203
+ if head_mask is not None:
204
+ attn_weights = attn_weights * head_mask
205
+
206
+ attn_output = torch.matmul(attn_weights, value)
207
+
208
+ return attn_output, attn_weights
209
+
210
+ def _get_embed_positions(self, position_ids):
211
+ embed_positions = self.embed_positions
212
+ if embed_positions.device != position_ids.device:
213
+ embed_positions = embed_positions.to(position_ids.device)
214
+ self.embed_positions = embed_positions
215
+ return embed_positions.repeat(position_ids.shape[0], 1, 1)
216
+
217
+ def forward(
218
+ self,
219
+ hidden_states: torch.FloatTensor,
220
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
221
+ attention_mask: Optional[torch.FloatTensor] = None,
222
+ position_ids: Optional[torch.LongTensor] = None,
223
+ head_mask: Optional[torch.FloatTensor] = None,
224
+ use_cache: Optional[bool] = False,
225
+ output_attentions: Optional[bool] = False,
226
+ ) -> Union[
227
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
228
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
229
+ ]:
230
+ query = self.q_proj(hidden_states)
231
+ key = self.k_proj(hidden_states)
232
+ value = self.v_proj(hidden_states)
233
+
234
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
235
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
236
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
237
+
238
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
239
+ # The logic to conditionally copy to GPU could not be traced, so we do this
240
+ # every time in the torch.fx case
241
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
242
+ else:
243
+ embed_positions = self._get_embed_positions(position_ids)
244
+
245
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
246
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
247
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
248
+
249
+ if self.rotary_dim is not None:
250
+ k_rot = key[:, :, :, : self.rotary_dim]
251
+ k_pass = key[:, :, :, self.rotary_dim :]
252
+
253
+ q_rot = query[:, :, :, : self.rotary_dim]
254
+ q_pass = query[:, :, :, self.rotary_dim :]
255
+
256
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
257
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
258
+
259
+ key = torch.cat([k_rot, k_pass], dim=-1)
260
+ query = torch.cat([q_rot, q_pass], dim=-1)
261
+ else:
262
+ key = apply_rotary_pos_emb(key, sin, cos)
263
+ query = apply_rotary_pos_emb(query, sin, cos)
264
+
265
+ key = key.permute(0, 2, 1, 3)
266
+ query = query.permute(0, 2, 1, 3)
267
+
268
+ if layer_past is not None:
269
+ past_key = layer_past[0]
270
+ past_value = layer_past[1]
271
+ key = torch.cat((past_key, key), dim=-2)
272
+ value = torch.cat((past_value, value), dim=-2)
273
+
274
+ if use_cache is True:
275
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
276
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
277
+ present = (key.to(hidden_states.dtype), value)
278
+ else:
279
+ present = None
280
+
281
+ # compute self-attention: V x Softmax(QK^T)
282
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
283
+
284
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
285
+ attn_output = self.out_proj(attn_output)
286
+ attn_output = self.resid_dropout(attn_output)
287
+
288
+ outputs = (attn_output, present)
289
+ if output_attentions:
290
+ outputs += (attn_weights,)
291
+
292
+ return outputs # a, present, (attentions)
293
+
294
+
295
+ class GPTJFlashAttention2(GPTJAttention):
296
+ """
297
+ GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays
298
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
299
+ flash attention and deal with padding tokens in case the input contains any of them.
300
+ """
301
+
302
+ def __init__(self, *args, **kwargs):
303
+ super().__init__(*args, **kwargs)
304
+
305
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
306
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
307
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
308
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
309
+
310
+ def forward(
311
+ self,
312
+ hidden_states: torch.FloatTensor,
313
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
314
+ attention_mask: Optional[torch.FloatTensor] = None,
315
+ position_ids: Optional[torch.LongTensor] = None,
316
+ head_mask: Optional[torch.FloatTensor] = None,
317
+ use_cache: Optional[bool] = False,
318
+ output_attentions: Optional[bool] = False,
319
+ ) -> Union[
320
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
321
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
322
+ ]:
323
+ query = self.q_proj(hidden_states)
324
+ key = self.k_proj(hidden_states)
325
+ value = self.v_proj(hidden_states)
326
+
327
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
328
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
329
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
330
+
331
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
332
+ # The logic to conditionally copy to GPU could not be traced, so we do this
333
+ # every time in the torch.fx case
334
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
335
+ else:
336
+ embed_positions = self._get_embed_positions(position_ids)
337
+
338
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
339
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
340
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
341
+
342
+ if self.rotary_dim is not None:
343
+ k_rot = key[:, :, :, : self.rotary_dim]
344
+ k_pass = key[:, :, :, self.rotary_dim :]
345
+
346
+ q_rot = query[:, :, :, : self.rotary_dim]
347
+ q_pass = query[:, :, :, self.rotary_dim :]
348
+
349
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
350
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
351
+
352
+ key = torch.cat([k_rot, k_pass], dim=-1)
353
+ query = torch.cat([q_rot, q_pass], dim=-1)
354
+ else:
355
+ key = apply_rotary_pos_emb(key, sin, cos)
356
+ query = apply_rotary_pos_emb(query, sin, cos)
357
+
358
+ # tanspose to have the desired shape
359
+ # before transpose: batch_size x seq_length x num_attention_heads x head_dim
360
+ # after transpose: batch_size x num_attention_heads x seq_length x head_dim
361
+ key = key.permute(0, 2, 1, 3)
362
+ query = query.permute(0, 2, 1, 3)
363
+ # value: batch_size x num_attention_heads x seq_length x head_dim
364
+
365
+ if layer_past is not None:
366
+ past_key = layer_past[0]
367
+ past_value = layer_past[1]
368
+ key = torch.cat((past_key, key), dim=-2)
369
+ value = torch.cat((past_value, value), dim=-2)
370
+
371
+ if use_cache is True:
372
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
373
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
374
+ present = (key.to(hidden_states.dtype), value)
375
+ else:
376
+ present = None
377
+
378
+ # The Flash attention requires the input to have the shape
379
+ # batch_size x seq_length x head_dim x hidden_dim
380
+ # therefore we need to keep the original shape for query and key, and reshape value
381
+ # to have the correct shape.
382
+ key = key.permute(0, 2, 1, 3).contiguous()
383
+ query = query.permute(0, 2, 1, 3).contiguous()
384
+ value = value.permute(0, 2, 1, 3).contiguous()
385
+
386
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
387
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
388
+ # cast them back in the correct dtype just to be sure everything works as expected.
389
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
390
+ # in fp32. (LlamaRMSNorm handles it correctly)
391
+
392
+ input_dtype = query.dtype
393
+ if input_dtype == torch.float32:
394
+ if torch.is_autocast_enabled():
395
+ target_dtype = torch.get_autocast_gpu_dtype()
396
+ # Handle the case where the model is quantized
397
+ elif hasattr(self.config, "_pre_quantization_dtype"):
398
+ target_dtype = self.config._pre_quantization_dtype
399
+ else:
400
+ target_dtype = self.q_proj.weight.dtype
401
+
402
+ logger.warning_once(
403
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
404
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
405
+ f" {target_dtype}."
406
+ )
407
+
408
+ query = query.to(target_dtype)
409
+ key = key.to(target_dtype)
410
+ value = value.to(target_dtype)
411
+
412
+ attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj
413
+
414
+ query_length = query.shape[1]
415
+
416
+ # Compute attention
417
+ attn_weights = self._flash_attention_forward(
418
+ query,
419
+ key,
420
+ value,
421
+ attention_mask,
422
+ query_length,
423
+ dropout=attention_dropout,
424
+ )
425
+
426
+ # Reshape outputs
427
+ attn_output = attn_weights.reshape(
428
+ attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3]
429
+ )
430
+ attn_output = self.out_proj(attn_output)
431
+ attn_output = self.resid_dropout(attn_output)
432
+
433
+ outputs = (attn_output, present)
434
+ if output_attentions:
435
+ outputs += (attn_weights,)
436
+
437
+ return outputs
438
+
439
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
440
+ def _flash_attention_forward(
441
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
442
+ ):
443
+ """
444
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
445
+ first unpad the input, then computes the attention scores and pad the final attention scores.
446
+
447
+ Args:
448
+ query_states (`torch.Tensor`):
449
+ Input query states to be passed to Flash Attention API
450
+ key_states (`torch.Tensor`):
451
+ Input key states to be passed to Flash Attention API
452
+ value_states (`torch.Tensor`):
453
+ Input value states to be passed to Flash Attention API
454
+ attention_mask (`torch.Tensor`):
455
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
456
+ position of padding tokens and 1 for the position of non-padding tokens.
457
+ dropout (`float`):
458
+ Attention dropout
459
+ softmax_scale (`float`, *optional*):
460
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
461
+ """
462
+ if not self._flash_attn_uses_top_left_mask:
463
+ causal = self.is_causal
464
+ else:
465
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
466
+ causal = self.is_causal and query_length != 1
467
+
468
+ # Contains at least one padding token in the sequence
469
+ if attention_mask is not None:
470
+ batch_size = query_states.shape[0]
471
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
472
+ query_states, key_states, value_states, attention_mask, query_length
473
+ )
474
+
475
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
476
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
477
+
478
+ attn_output_unpad = flash_attn_varlen_func(
479
+ query_states,
480
+ key_states,
481
+ value_states,
482
+ cu_seqlens_q=cu_seqlens_q,
483
+ cu_seqlens_k=cu_seqlens_k,
484
+ max_seqlen_q=max_seqlen_in_batch_q,
485
+ max_seqlen_k=max_seqlen_in_batch_k,
486
+ dropout_p=dropout,
487
+ softmax_scale=softmax_scale,
488
+ causal=causal,
489
+ )
490
+
491
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
492
+ else:
493
+ attn_output = flash_attn_func(
494
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
495
+ )
496
+
497
+ return attn_output
498
+
499
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads
500
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
501
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
502
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
503
+
504
+ key_layer = index_first_axis(
505
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
506
+ )
507
+ value_layer = index_first_axis(
508
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
509
+ )
510
+ if query_length == kv_seq_len:
511
+ query_layer = index_first_axis(
512
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k
513
+ )
514
+ cu_seqlens_q = cu_seqlens_k
515
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
516
+ indices_q = indices_k
517
+ elif query_length == 1:
518
+ max_seqlen_in_batch_q = 1
519
+ cu_seqlens_q = torch.arange(
520
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
521
+ ) # There is a memcpy here, that is very bad.
522
+ indices_q = cu_seqlens_q[:-1]
523
+ query_layer = query_layer.squeeze(1)
524
+ else:
525
+ # The -q_len: slice assumes left padding.
526
+ attention_mask = attention_mask[:, -query_length:]
527
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
528
+
529
+ return (
530
+ query_layer,
531
+ key_layer,
532
+ value_layer,
533
+ indices_q,
534
+ (cu_seqlens_q, cu_seqlens_k),
535
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
536
+ )
537
+
538
+
539
+ GPTJ_ATTENTION_CLASSES = {
540
+ "eager": GPTJAttention,
541
+ "flash_attention_2": GPTJFlashAttention2,
542
+ }
543
+
544
+
545
+ class GPTJMLP(nn.Module):
546
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
547
+ super().__init__()
548
+ embed_dim = config.n_embd
549
+
550
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
551
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
552
+
553
+ self.act = ACT2FN[config.activation_function]
554
+ self.dropout = nn.Dropout(config.resid_pdrop)
555
+
556
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
557
+ hidden_states = self.fc_in(hidden_states)
558
+ hidden_states = self.act(hidden_states)
559
+ hidden_states = self.fc_out(hidden_states)
560
+ hidden_states = self.dropout(hidden_states)
561
+ return hidden_states
562
+
563
+
564
+ class GPTJBlock(nn.Module):
565
+ def __init__(self, config):
566
+ super().__init__()
567
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
568
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
569
+ self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config)
570
+ self.mlp = GPTJMLP(inner_dim, config)
571
+
572
+ def forward(
573
+ self,
574
+ hidden_states: Optional[torch.FloatTensor],
575
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
576
+ attention_mask: Optional[torch.FloatTensor] = None,
577
+ position_ids: Optional[torch.LongTensor] = None,
578
+ head_mask: Optional[torch.FloatTensor] = None,
579
+ use_cache: Optional[bool] = False,
580
+ output_attentions: Optional[bool] = False,
581
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
582
+ residual = hidden_states
583
+ hidden_states = self.ln_1(hidden_states)
584
+ attn_outputs = self.attn(
585
+ hidden_states=hidden_states,
586
+ layer_past=layer_past,
587
+ attention_mask=attention_mask,
588
+ position_ids=position_ids,
589
+ head_mask=head_mask,
590
+ use_cache=use_cache,
591
+ output_attentions=output_attentions,
592
+ )
593
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
594
+ outputs = attn_outputs[1:]
595
+
596
+ feed_forward_hidden_states = self.mlp(hidden_states)
597
+ hidden_states = attn_output + feed_forward_hidden_states + residual
598
+
599
+ if use_cache:
600
+ outputs = (hidden_states,) + outputs
601
+ else:
602
+ outputs = (hidden_states,) + outputs[1:]
603
+
604
+ return outputs # hidden_states, present, (attentions)
605
+
606
+
607
+ class GPTJPreTrainedModel(PreTrainedModel):
608
+ """
609
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
610
+ models.
611
+ """
612
+
613
+ config_class = GPTJConfig
614
+ base_model_prefix = "transformer"
615
+ is_parallelizable = True
616
+ supports_gradient_checkpointing = True
617
+ _no_split_modules = ["GPTJBlock"]
618
+ _skip_keys_device_placement = "past_key_values"
619
+ _supports_flash_attn_2 = True
620
+
621
+ def __init__(self, *inputs, **kwargs):
622
+ super().__init__(*inputs, **kwargs)
623
+
624
+ def _init_weights(self, module):
625
+ """Initialize the weights."""
626
+ if isinstance(module, (nn.Linear,)):
627
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
628
+ # cf https://github.com/pytorch/pytorch/pull/5617
629
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
630
+ if module.bias is not None:
631
+ module.bias.data.zero_()
632
+ elif isinstance(module, nn.Embedding):
633
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
634
+ if module.padding_idx is not None:
635
+ module.weight.data[module.padding_idx].zero_()
636
+ elif isinstance(module, nn.LayerNorm):
637
+ module.bias.data.zero_()
638
+ module.weight.data.fill_(1.0)
639
+
640
+
641
+ GPTJ_START_DOCSTRING = r"""
642
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
643
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
644
+ behavior.
645
+
646
+ Parameters:
647
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
648
+ Initializing with a config file does not load the weights associated with the model, only the
649
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
650
+ """
651
+
652
+ GPTJ_INPUTS_DOCSTRING = r"""
653
+ Args:
654
+ input_ids (`torch.LongTensor` of shape `({0})`):
655
+ Indices of input sequence tokens in the vocabulary.
656
+
657
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
658
+ [`PreTrainedTokenizer.__call__`] for details.
659
+
660
+ [What are input IDs?](../glossary#input-ids)
661
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
662
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
663
+
664
+ - 1 for tokens that are **not masked**,
665
+ - 0 for tokens that are **masked**.
666
+
667
+ [What are attention masks?](../glossary#attention-mask)
668
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
669
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
670
+ 1]`:
671
+
672
+ - 0 corresponds to a *sentence A* token,
673
+ - 1 corresponds to a *sentence B* token.
674
+
675
+ [What are token type IDs?](../glossary#token-type-ids)
676
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
677
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
678
+ config.n_positions - 1]`.
679
+
680
+ [What are position IDs?](../glossary#position-ids)
681
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
682
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
683
+
684
+ - 1 indicates the head is **not masked**,
685
+ - 0 indicates the head is **masked**.
686
+
687
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
688
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
689
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
690
+ model's internal embedding lookup matrix.
691
+ output_attentions (`bool`, *optional*):
692
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
693
+ tensors for more detail.
694
+ output_hidden_states (`bool`, *optional*):
695
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
696
+ more detail.
697
+ return_dict (`bool`, *optional*):
698
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
699
+ """
700
+
701
+ PARALLELIZE_DOCSTRING = r"""
702
+ This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
703
+ attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
704
+ across all devices.
705
+
706
+ Args:
707
+ device_map (`Dict[int, list]`, optional, defaults to None):
708
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
709
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
710
+ have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
711
+ following number of attention modules:
712
+
713
+ - gpt-j-6B: 28
714
+
715
+ Example:
716
+
717
+ ```python
718
+ # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
719
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
720
+ device_map = {
721
+ 0: [0, 1, 2, 3, 4, 5, 6],
722
+ 1: [7, 8, 9, 10, 11, 12, 13],
723
+ 2: [14, 15, 16, 17, 18, 19, 20],
724
+ 3: [21, 22, 23, 24, 25, 26, 27],
725
+ }
726
+ model.parallelize(device_map)
727
+ ```
728
+ """
729
+
730
+ DEPARALLELIZE_DOCSTRING = r"""
731
+ Moves the model to CPU from a model parallel state.
732
+
733
+ Example:
734
+
735
+ ```python
736
+ # On a 4 GPU machine with gpt-j-6B:
737
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
738
+ device_map = {
739
+ 0: [0, 1, 2, 3, 4, 5, 6],
740
+ 1: [7, 8, 9, 10, 11, 12, 13],
741
+ 2: [14, 15, 16, 17, 18, 19, 20],
742
+ 3: [21, 22, 23, 24, 25, 26, 27],
743
+ }
744
+ model.parallelize(device_map) # Splits the model across several devices
745
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
746
+ ```
747
+ """
748
+
749
+
750
+ @add_start_docstrings(
751
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
752
+ GPTJ_START_DOCSTRING,
753
+ )
754
+ class GPTJModel(GPTJPreTrainedModel):
755
+ def __init__(self, config):
756
+ super().__init__(config)
757
+
758
+ self.embed_dim = config.n_embd
759
+ self.vocab_size = config.vocab_size
760
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
761
+ self.drop = nn.Dropout(config.embd_pdrop)
762
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
763
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
764
+
765
+ # Model parallel
766
+ self.model_parallel = False
767
+ self.device_map = None
768
+ self.gradient_checkpointing = False
769
+
770
+ # Initialize weights and apply final processing
771
+ self.post_init()
772
+
773
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
774
+
775
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
776
+ def parallelize(self, device_map=None):
777
+ warnings.warn(
778
+ "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
779
+ " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
780
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
781
+ " ...}",
782
+ FutureWarning,
783
+ )
784
+ # Check validity of device_map
785
+ self.device_map = (
786
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
787
+ )
788
+ assert_device_map(self.device_map, len(self.h))
789
+ self.model_parallel = True
790
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
791
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
792
+ self.wte = self.wte.to(self.first_device)
793
+ # Load onto devices
794
+ for k, v in self.device_map.items():
795
+ for block in v:
796
+ cuda_device = "cuda:" + str(k)
797
+ self.h[block] = self.h[block].to(cuda_device)
798
+ # ln_f to last
799
+ self.ln_f = self.ln_f.to(self.last_device)
800
+
801
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
802
+ def deparallelize(self):
803
+ warnings.warn(
804
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
805
+ FutureWarning,
806
+ )
807
+ self.model_parallel = False
808
+ self.device_map = None
809
+ self.first_device = "cpu"
810
+ self.last_device = "cpu"
811
+ self.wte = self.wte.to("cpu")
812
+ for index in range(len(self.h)):
813
+ self.h[index] = self.h[index].to("cpu")
814
+ self.ln_f = self.ln_f.to("cpu")
815
+ torch.cuda.empty_cache()
816
+
817
+ def get_input_embeddings(self):
818
+ return self.wte
819
+
820
+ def set_input_embeddings(self, new_embeddings):
821
+ self.wte = new_embeddings
822
+
823
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
824
+ @add_code_sample_docstrings(
825
+ checkpoint=_CHECKPOINT_FOR_DOC,
826
+ output_type=BaseModelOutputWithPast,
827
+ config_class=_CONFIG_FOR_DOC,
828
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
829
+ )
830
+ def forward(
831
+ self,
832
+ input_ids: Optional[torch.LongTensor] = None,
833
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
834
+ attention_mask: Optional[torch.FloatTensor] = None,
835
+ token_type_ids: Optional[torch.LongTensor] = None,
836
+ position_ids: Optional[torch.LongTensor] = None,
837
+ head_mask: Optional[torch.FloatTensor] = None,
838
+ inputs_embeds: Optional[torch.FloatTensor] = None,
839
+ use_cache: Optional[bool] = None,
840
+ output_attentions: Optional[bool] = None,
841
+ output_hidden_states: Optional[bool] = None,
842
+ return_dict: Optional[bool] = None,
843
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
844
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
845
+ output_hidden_states = (
846
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
847
+ )
848
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
849
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
850
+
851
+ if input_ids is not None and inputs_embeds is not None:
852
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
853
+ elif input_ids is not None:
854
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
855
+ input_shape = input_ids.size()
856
+ input_ids = input_ids.view(-1, input_shape[-1])
857
+ batch_size = input_ids.shape[0]
858
+ elif inputs_embeds is not None:
859
+ input_shape = inputs_embeds.size()[:-1]
860
+ batch_size = inputs_embeds.shape[0]
861
+ else:
862
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
863
+
864
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
865
+
866
+ if token_type_ids is not None:
867
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
868
+
869
+ if past_key_values is None:
870
+ past_length = 0
871
+ past_key_values = tuple([None] * len(self.h))
872
+ else:
873
+ past_length = past_key_values[0][0].size(-2)
874
+
875
+ if position_ids is None:
876
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
877
+ position_ids = position_ids.unsqueeze(0)
878
+
879
+ if not self._use_flash_attention_2:
880
+ # Attention mask.
881
+ if attention_mask is not None:
882
+ if batch_size <= 0:
883
+ raise ValueError("batch_size has to be defined and > 0")
884
+ attention_mask = attention_mask.view(batch_size, -1)
885
+ # We create a 3D attention mask from a 2D tensor mask.
886
+ # Sizes are [batch_size, 1, 1, to_seq_length]
887
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
888
+ # this attention mask is more simple than the triangular masking of causal attention
889
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
890
+ attention_mask = attention_mask[:, None, None, :]
891
+
892
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
893
+ # masked positions, this operation will create a tensor which is 0.0 for
894
+ # positions we want to attend and the dtype's smallest value for masked positions.
895
+ # Since we are adding it to the raw scores before the softmax, this is
896
+ # effectively the same as removing these entirely.
897
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
898
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
899
+
900
+ # Prepare head mask if needed
901
+ # 1.0 in head_mask indicate we keep the head
902
+ # attention_probs has shape bsz x num_attention_heads x N x N
903
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
904
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
905
+
906
+ if inputs_embeds is None:
907
+ inputs_embeds = self.wte(input_ids)
908
+
909
+ hidden_states = inputs_embeds
910
+
911
+ if token_type_ids is not None:
912
+ token_type_embeds = self.wte(token_type_ids)
913
+ hidden_states = hidden_states + token_type_embeds
914
+
915
+ hidden_states = self.drop(hidden_states)
916
+
917
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
918
+
919
+ if self.gradient_checkpointing and self.training:
920
+ if use_cache:
921
+ logger.warning_once(
922
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
923
+ )
924
+ use_cache = False
925
+
926
+ presents = () if use_cache else None
927
+ all_self_attentions = () if output_attentions else None
928
+ all_hidden_states = () if output_hidden_states else None
929
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
930
+ # Model parallel
931
+ if self.model_parallel:
932
+ torch.cuda.set_device(hidden_states.device)
933
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
934
+ if layer_past is not None:
935
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
936
+ # Ensure that attention_mask is always on the same device as hidden_states
937
+ if attention_mask is not None:
938
+ attention_mask = attention_mask.to(hidden_states.device)
939
+ if isinstance(head_mask, torch.Tensor):
940
+ head_mask = head_mask.to(hidden_states.device)
941
+ if output_hidden_states:
942
+ all_hidden_states = all_hidden_states + (hidden_states,)
943
+
944
+ if self.gradient_checkpointing and self.training:
945
+ outputs = self._gradient_checkpointing_func(
946
+ block.__call__,
947
+ hidden_states,
948
+ None,
949
+ attention_mask,
950
+ position_ids,
951
+ head_mask[i],
952
+ use_cache,
953
+ output_attentions,
954
+ )
955
+ else:
956
+ outputs = block(
957
+ hidden_states=hidden_states,
958
+ layer_past=layer_past,
959
+ attention_mask=attention_mask,
960
+ position_ids=position_ids,
961
+ head_mask=head_mask[i],
962
+ use_cache=use_cache,
963
+ output_attentions=output_attentions,
964
+ )
965
+
966
+ hidden_states = outputs[0]
967
+ if use_cache is True:
968
+ presents = presents + (outputs[1],)
969
+
970
+ if output_attentions:
971
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
972
+
973
+ # Model Parallel: If it's the last layer for that device, put things on the next device
974
+ if self.model_parallel:
975
+ for k, v in self.device_map.items():
976
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
977
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
978
+
979
+ hidden_states = self.ln_f(hidden_states)
980
+
981
+ hidden_states = hidden_states.view(output_shape)
982
+ # Add last hidden state
983
+ if output_hidden_states:
984
+ all_hidden_states = all_hidden_states + (hidden_states,)
985
+
986
+ if not return_dict:
987
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
988
+
989
+ return BaseModelOutputWithPast(
990
+ last_hidden_state=hidden_states,
991
+ past_key_values=presents,
992
+ hidden_states=all_hidden_states,
993
+ attentions=all_self_attentions,
994
+ )
995
+
996
+
997
+ @add_start_docstrings(
998
+ """
999
+ The GPT-J Model transformer with a language modeling head on top.
1000
+ """,
1001
+ GPTJ_START_DOCSTRING,
1002
+ )
1003
+ class GPTJForCausalLM(GPTJPreTrainedModel):
1004
+ _tied_weights_keys = ["lm_head.weight"]
1005
+
1006
+ def __init__(self, config):
1007
+ super().__init__(config)
1008
+ self.transformer = GPTJModel(config)
1009
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
1010
+
1011
+ # Model parallel
1012
+ self.model_parallel = False
1013
+ self.device_map = None
1014
+
1015
+ # Initialize weights and apply final processing
1016
+ self.post_init()
1017
+
1018
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1019
+ def parallelize(self, device_map=None):
1020
+ warnings.warn(
1021
+ "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
1022
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
1023
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
1024
+ " 0, 'transformer.h.1': 1, ...}",
1025
+ FutureWarning,
1026
+ )
1027
+ self.device_map = (
1028
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1029
+ if device_map is None
1030
+ else device_map
1031
+ )
1032
+ assert_device_map(self.device_map, len(self.transformer.h))
1033
+ self.transformer.parallelize(self.device_map)
1034
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1035
+ self.model_parallel = True
1036
+
1037
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1038
+ def deparallelize(self):
1039
+ warnings.warn(
1040
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
1041
+ FutureWarning,
1042
+ )
1043
+ self.transformer.deparallelize()
1044
+ self.transformer = self.transformer.to("cpu")
1045
+ self.lm_head = self.lm_head.to("cpu")
1046
+ self.model_parallel = False
1047
+ torch.cuda.empty_cache()
1048
+
1049
+ def get_output_embeddings(self):
1050
+ return self.lm_head
1051
+
1052
+ def set_output_embeddings(self, new_embeddings):
1053
+ self.lm_head = new_embeddings
1054
+
1055
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
1056
+ token_type_ids = kwargs.get("token_type_ids", None)
1057
+ # Omit tokens covered by past_key_values
1058
+ if past_key_values:
1059
+ past_length = past_key_values[0][0].shape[2]
1060
+
1061
+ # Some generation methods already pass only the last input ID
1062
+ if input_ids.shape[1] > past_length:
1063
+ remove_prefix_length = past_length
1064
+ else:
1065
+ # Default to old behavior: keep only final ID
1066
+ remove_prefix_length = input_ids.shape[1] - 1
1067
+
1068
+ input_ids = input_ids[:, remove_prefix_length:]
1069
+ if token_type_ids is not None:
1070
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
1071
+
1072
+ attention_mask = kwargs.get("attention_mask", None)
1073
+ position_ids = kwargs.get("position_ids", None)
1074
+
1075
+ if attention_mask is not None and position_ids is None:
1076
+ # create position_ids on the fly for batch generation
1077
+ position_ids = attention_mask.long().cumsum(-1) - 1
1078
+ position_ids.masked_fill_(attention_mask == 0, 1)
1079
+ if past_key_values:
1080
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1081
+
1082
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1083
+ if inputs_embeds is not None and past_key_values is None:
1084
+ model_inputs = {"inputs_embeds": inputs_embeds}
1085
+ else:
1086
+ model_inputs = {"input_ids": input_ids}
1087
+
1088
+ model_inputs.update(
1089
+ {
1090
+ "past_key_values": past_key_values,
1091
+ "use_cache": kwargs.get("use_cache"),
1092
+ "position_ids": position_ids,
1093
+ "attention_mask": attention_mask,
1094
+ "token_type_ids": token_type_ids,
1095
+ }
1096
+ )
1097
+
1098
+ return model_inputs
1099
+
1100
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1101
+ @add_code_sample_docstrings(
1102
+ checkpoint=_CHECKPOINT_FOR_DOC,
1103
+ output_type=CausalLMOutputWithPast,
1104
+ config_class=_CONFIG_FOR_DOC,
1105
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1106
+ )
1107
+ def forward(
1108
+ self,
1109
+ input_ids: Optional[torch.LongTensor] = None,
1110
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1111
+ attention_mask: Optional[torch.FloatTensor] = None,
1112
+ token_type_ids: Optional[torch.LongTensor] = None,
1113
+ position_ids: Optional[torch.LongTensor] = None,
1114
+ head_mask: Optional[torch.FloatTensor] = None,
1115
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1116
+ labels: Optional[torch.LongTensor] = None,
1117
+ use_cache: Optional[bool] = None,
1118
+ output_attentions: Optional[bool] = None,
1119
+ output_hidden_states: Optional[bool] = None,
1120
+ return_dict: Optional[bool] = None,
1121
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1122
+ r"""
1123
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1124
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1125
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1126
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1127
+ """
1128
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1129
+
1130
+ transformer_outputs = self.transformer(
1131
+ input_ids,
1132
+ past_key_values=past_key_values,
1133
+ attention_mask=attention_mask,
1134
+ token_type_ids=token_type_ids,
1135
+ position_ids=position_ids,
1136
+ head_mask=head_mask,
1137
+ inputs_embeds=inputs_embeds,
1138
+ use_cache=use_cache,
1139
+ output_attentions=output_attentions,
1140
+ output_hidden_states=output_hidden_states,
1141
+ return_dict=return_dict,
1142
+ )
1143
+ hidden_states = transformer_outputs[0]
1144
+
1145
+ # Set device for model parallelism
1146
+ if self.model_parallel:
1147
+ torch.cuda.set_device(self.transformer.first_device)
1148
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1149
+
1150
+ # make sure sampling in fp16 works correctly and
1151
+ # compute loss in fp32 to match with mesh-tf version
1152
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
1153
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
1154
+
1155
+ loss = None
1156
+ if labels is not None:
1157
+ # move labels to correct device to enable model parallelism
1158
+ labels = labels.to(lm_logits.device)
1159
+ # Shift so that tokens < n predict n
1160
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1161
+ shift_labels = labels[..., 1:].contiguous()
1162
+ # Flatten the tokens
1163
+ loss_fct = CrossEntropyLoss()
1164
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1165
+
1166
+ loss = loss.to(hidden_states.dtype)
1167
+
1168
+ if not return_dict:
1169
+ output = (lm_logits,) + transformer_outputs[1:]
1170
+ return ((loss,) + output) if loss is not None else output
1171
+
1172
+ return CausalLMOutputWithPast(
1173
+ loss=loss,
1174
+ logits=lm_logits,
1175
+ past_key_values=transformer_outputs.past_key_values,
1176
+ hidden_states=transformer_outputs.hidden_states,
1177
+ attentions=transformer_outputs.attentions,
1178
+ )
1179
+
1180
+ @staticmethod
1181
+ def _reorder_cache(
1182
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1183
+ ) -> Tuple[Tuple[torch.Tensor]]:
1184
+ """
1185
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
1186
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1187
+ beam_idx at every generation step.
1188
+ """
1189
+ return tuple(
1190
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1191
+ for layer_past in past_key_values
1192
+ )
1193
+
1194
+
1195
+ @add_start_docstrings(
1196
+ """
1197
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
1198
+
1199
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1200
+ (e.g. GPT, GPT-2, GPT-Neo) do.
1201
+
1202
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1203
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1204
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1205
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1206
+ each row of the batch).
1207
+ """,
1208
+ GPTJ_START_DOCSTRING,
1209
+ )
1210
+ class GPTJForSequenceClassification(GPTJPreTrainedModel):
1211
+ def __init__(self, config):
1212
+ super().__init__(config)
1213
+ self.num_labels = config.num_labels
1214
+ self.transformer = GPTJModel(config)
1215
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1216
+
1217
+ # Model parallel
1218
+ self.model_parallel = False
1219
+ self.device_map = None
1220
+
1221
+ # Initialize weights and apply final processing
1222
+ self.post_init()
1223
+
1224
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1225
+ @add_code_sample_docstrings(
1226
+ checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
1227
+ output_type=SequenceClassifierOutputWithPast,
1228
+ config_class=_CONFIG_FOR_DOC,
1229
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1230
+ )
1231
+ def forward(
1232
+ self,
1233
+ input_ids: Optional[torch.LongTensor] = None,
1234
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1235
+ attention_mask: Optional[torch.FloatTensor] = None,
1236
+ token_type_ids: Optional[torch.LongTensor] = None,
1237
+ position_ids: Optional[torch.LongTensor] = None,
1238
+ head_mask: Optional[torch.FloatTensor] = None,
1239
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1240
+ labels: Optional[torch.LongTensor] = None,
1241
+ use_cache: Optional[bool] = None,
1242
+ output_attentions: Optional[bool] = None,
1243
+ output_hidden_states: Optional[bool] = None,
1244
+ return_dict: Optional[bool] = None,
1245
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1246
+ r"""
1247
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1248
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1249
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1250
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1251
+ """
1252
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1253
+
1254
+ transformer_outputs = self.transformer(
1255
+ input_ids,
1256
+ past_key_values=past_key_values,
1257
+ attention_mask=attention_mask,
1258
+ token_type_ids=token_type_ids,
1259
+ position_ids=position_ids,
1260
+ head_mask=head_mask,
1261
+ inputs_embeds=inputs_embeds,
1262
+ use_cache=use_cache,
1263
+ output_attentions=output_attentions,
1264
+ output_hidden_states=output_hidden_states,
1265
+ return_dict=return_dict,
1266
+ )
1267
+ hidden_states = transformer_outputs[0]
1268
+ logits = self.score(hidden_states)
1269
+
1270
+ if input_ids is not None:
1271
+ batch_size = input_ids.shape[0]
1272
+ else:
1273
+ batch_size = inputs_embeds.shape[0]
1274
+
1275
+ if self.config.pad_token_id is None and batch_size != 1:
1276
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1277
+ if self.config.pad_token_id is None:
1278
+ sequence_lengths = -1
1279
+ else:
1280
+ if input_ids is not None:
1281
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1282
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1283
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1284
+ sequence_lengths = sequence_lengths.to(logits.device)
1285
+ else:
1286
+ sequence_lengths = -1
1287
+ logger.warning(
1288
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1289
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1290
+ )
1291
+
1292
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1293
+
1294
+ loss = None
1295
+ if labels is not None:
1296
+ labels = labels.to(pooled_logits.device)
1297
+ if self.config.problem_type is None:
1298
+ if self.num_labels == 1:
1299
+ self.config.problem_type = "regression"
1300
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1301
+ self.config.problem_type = "single_label_classification"
1302
+ else:
1303
+ self.config.problem_type = "multi_label_classification"
1304
+
1305
+ if self.config.problem_type == "regression":
1306
+ loss_fct = MSELoss()
1307
+ if self.num_labels == 1:
1308
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1309
+ else:
1310
+ loss = loss_fct(pooled_logits, labels)
1311
+ elif self.config.problem_type == "single_label_classification":
1312
+ loss_fct = CrossEntropyLoss()
1313
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1314
+ elif self.config.problem_type == "multi_label_classification":
1315
+ loss_fct = BCEWithLogitsLoss()
1316
+ loss = loss_fct(pooled_logits, labels)
1317
+ if not return_dict:
1318
+ output = (pooled_logits,) + transformer_outputs[1:]
1319
+ return ((loss,) + output) if loss is not None else output
1320
+
1321
+ return SequenceClassifierOutputWithPast(
1322
+ loss=loss,
1323
+ logits=pooled_logits,
1324
+ past_key_values=transformer_outputs.past_key_values,
1325
+ hidden_states=transformer_outputs.hidden_states,
1326
+ attentions=transformer_outputs.attentions,
1327
+ )
1328
+
1329
+
1330
+ @add_start_docstrings(
1331
+ """
1332
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1333
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1334
+ """,
1335
+ GPTJ_START_DOCSTRING,
1336
+ )
1337
+ class GPTJForQuestionAnswering(GPTJPreTrainedModel):
1338
+ def __init__(self, config):
1339
+ super().__init__(config)
1340
+ self.num_labels = config.num_labels
1341
+ self.transformer = GPTJModel(config)
1342
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1343
+
1344
+ # Model parallel
1345
+ self.model_parallel = False
1346
+ self.device_map = None
1347
+
1348
+ # Initialize weights and apply final processing
1349
+ self.post_init()
1350
+
1351
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1352
+ @add_code_sample_docstrings(
1353
+ checkpoint=_CHECKPOINT_FOR_DOC,
1354
+ output_type=QuestionAnsweringModelOutput,
1355
+ config_class=_CONFIG_FOR_DOC,
1356
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1357
+ )
1358
+ def forward(
1359
+ self,
1360
+ input_ids: Optional[torch.LongTensor] = None,
1361
+ attention_mask: Optional[torch.FloatTensor] = None,
1362
+ token_type_ids: Optional[torch.LongTensor] = None,
1363
+ position_ids: Optional[torch.LongTensor] = None,
1364
+ head_mask: Optional[torch.FloatTensor] = None,
1365
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1366
+ start_positions: Optional[torch.LongTensor] = None,
1367
+ end_positions: Optional[torch.LongTensor] = None,
1368
+ output_attentions: Optional[bool] = None,
1369
+ output_hidden_states: Optional[bool] = None,
1370
+ return_dict: Optional[bool] = None,
1371
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1372
+ r"""
1373
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1374
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1375
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1376
+ are not taken into account for computing the loss.
1377
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1378
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1379
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1380
+ are not taken into account for computing the loss.
1381
+ """
1382
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1383
+
1384
+ outputs = self.transformer(
1385
+ input_ids,
1386
+ attention_mask=attention_mask,
1387
+ token_type_ids=token_type_ids,
1388
+ position_ids=position_ids,
1389
+ head_mask=head_mask,
1390
+ inputs_embeds=inputs_embeds,
1391
+ output_attentions=output_attentions,
1392
+ output_hidden_states=output_hidden_states,
1393
+ return_dict=return_dict,
1394
+ )
1395
+
1396
+ sequence_output = outputs[0]
1397
+
1398
+ logits = self.qa_outputs(sequence_output)
1399
+ start_logits, end_logits = logits.split(1, dim=-1)
1400
+ start_logits = start_logits.squeeze(-1).contiguous()
1401
+ end_logits = end_logits.squeeze(-1).contiguous()
1402
+
1403
+ total_loss = None
1404
+ if start_positions is not None and end_positions is not None:
1405
+ # If we are on multi-GPU, split add a dimension
1406
+ if len(start_positions.size()) > 1:
1407
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1408
+ if len(end_positions.size()) > 1:
1409
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1410
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1411
+ ignored_index = start_logits.size(1)
1412
+ start_positions = start_positions.clamp(0, ignored_index)
1413
+ end_positions = end_positions.clamp(0, ignored_index)
1414
+
1415
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1416
+ start_loss = loss_fct(start_logits, start_positions)
1417
+ end_loss = loss_fct(end_logits, end_positions)
1418
+ total_loss = (start_loss + end_loss) / 2
1419
+
1420
+ if not return_dict:
1421
+ output = (start_logits, end_logits) + outputs[2:]
1422
+ return ((total_loss,) + output) if total_loss is not None else output
1423
+
1424
+ return QuestionAnsweringModelOutput(
1425
+ loss=total_loss,
1426
+ start_logits=start_logits,
1427
+ end_logits=end_logits,
1428
+ hidden_states=outputs.hidden_states,
1429
+ attentions=outputs.attentions,
1430
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py ADDED
@@ -0,0 +1,1104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 GPT-J model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import tensorflow as tf
23
+
24
+ from ...activations_tf import get_tf_activation
25
+ from ...file_utils import (
26
+ add_code_sample_docstrings,
27
+ add_start_docstrings,
28
+ add_start_docstrings_to_model_forward,
29
+ )
30
+ from ...modeling_tf_outputs import (
31
+ TFBaseModelOutputWithPast,
32
+ TFCausalLMOutputWithPast,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutputWithPast,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFCausalLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFPreTrainedModel,
40
+ TFQuestionAnsweringLoss,
41
+ TFSequenceClassificationLoss,
42
+ TFSharedEmbeddings,
43
+ get_initializer,
44
+ keras,
45
+ keras_serializable,
46
+ unpack_inputs,
47
+ )
48
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
49
+ from ...utils import logging
50
+ from .configuration_gptj import GPTJConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
56
+ _CONFIG_FOR_DOC = "GPTJConfig"
57
+
58
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
59
+ "EleutherAI/gpt-j-6B",
60
+ # See all GPT-J models at https://huggingface.co/models?filter=gptj
61
+ ]
62
+
63
+
64
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
65
+ inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
66
+ sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
67
+ sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
68
+ out = tf.concat((sin, cos), axis=1)
69
+ return out
70
+
71
+
72
+ def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
73
+ rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
74
+ new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
75
+ rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
76
+ return rotate_half_tensor
77
+
78
+
79
+ def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
80
+ sin_pos, cos_pos = sincos
81
+ sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
82
+ cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
83
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
84
+
85
+
86
+ class TFGPTJAttention(keras.layers.Layer):
87
+ def __init__(self, config: GPTJConfig, **kwargs):
88
+ super().__init__(**kwargs)
89
+
90
+ self.embed_dim = config.hidden_size
91
+ self.num_attention_heads = config.num_attention_heads
92
+ self.head_dim = self.embed_dim // self.num_attention_heads
93
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
94
+ raise ValueError(
95
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
96
+ f" `num_attention_heads`: {self.num_attention_heads})."
97
+ )
98
+ self.scale_attn = self.head_dim**0.5
99
+ self.rotary_dim = config.rotary_dim
100
+
101
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
102
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
103
+
104
+ self.q_proj = keras.layers.Dense(
105
+ self.embed_dim,
106
+ use_bias=False,
107
+ kernel_initializer=get_initializer(config.initializer_range),
108
+ name="q_proj",
109
+ )
110
+ self.k_proj = keras.layers.Dense(
111
+ self.embed_dim,
112
+ use_bias=False,
113
+ kernel_initializer=get_initializer(config.initializer_range),
114
+ name="k_proj",
115
+ )
116
+ self.v_proj = keras.layers.Dense(
117
+ self.embed_dim,
118
+ use_bias=False,
119
+ kernel_initializer=get_initializer(config.initializer_range),
120
+ name="v_proj",
121
+ )
122
+ self.out_proj = keras.layers.Dense(
123
+ self.embed_dim,
124
+ use_bias=False,
125
+ kernel_initializer=get_initializer(config.initializer_range),
126
+ name="out_proj",
127
+ )
128
+
129
+ self.max_positions = config.max_position_embeddings
130
+ self.lower_triangle_mask = tf.reshape(
131
+ tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
132
+ (1, 1, self.max_positions, self.max_positions),
133
+ )
134
+ pos_embd_dim = self.rotary_dim or self.embed_dim
135
+ self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
136
+
137
+ def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
138
+ return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
139
+
140
+ @staticmethod
141
+ def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
142
+ return tf.cast(tf.constant(-1e9), dtype)
143
+
144
+ def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
145
+ """
146
+ Splits hidden dim into attn_head_size and num_attention_heads
147
+ """
148
+ new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
149
+ hidden_states = tf.reshape(hidden_states, new_shape)
150
+ if rotary:
151
+ return hidden_states
152
+ if len(shape_list(hidden_states)) == 4:
153
+ return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
154
+ if len(shape_list(hidden_states)) == 5:
155
+ return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
156
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
157
+
158
+ def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
159
+ """
160
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
161
+ """
162
+ if len(shape_list(hidden_states)) == 4:
163
+ hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
164
+ elif len(shape_list(hidden_states)) == 5:
165
+ hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
166
+ else:
167
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
168
+ new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
169
+ return tf.reshape(hidden_states, new_shape)
170
+
171
+ def _attn(
172
+ self,
173
+ query: tf.Tensor,
174
+ key: tf.Tensor,
175
+ value: tf.Tensor,
176
+ attention_mask: tf.Tensor | None = None,
177
+ head_mask: tf.Tensor | None = None,
178
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
179
+ # compute causal mask from causal mask buffer
180
+ query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
181
+ causal_mask = self.get_causal_mask(key_length, query_length)
182
+
183
+ # Keep the attention weights computation in fp32 to avoid overflow issues
184
+ query = tf.cast(query, tf.float32)
185
+ key = tf.cast(key, tf.float32)
186
+
187
+ attn_weights = tf.matmul(query, key, transpose_b=True)
188
+ attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
189
+
190
+ attn_weights = attn_weights / self.scale_attn
191
+
192
+ if attention_mask is not None:
193
+ # Apply the attention mask
194
+ attn_weights = attn_weights + attention_mask
195
+
196
+ attn_weights = stable_softmax(attn_weights, axis=-1)
197
+ attn_weights = tf.cast(attn_weights, value.dtype)
198
+ attn_weights = self.attn_dropout(attn_weights)
199
+
200
+ # Mask heads if we want to
201
+ if head_mask is not None:
202
+ attn_weights = attn_weights * head_mask
203
+
204
+ attn_output = tf.matmul(attn_weights, value)
205
+
206
+ return attn_output, attn_weights
207
+
208
+ def call(
209
+ self,
210
+ hidden_states: tf.Tensor,
211
+ layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
212
+ attention_mask: tf.Tensor | None = None,
213
+ position_ids: tf.Tensor | None = None,
214
+ head_mask: tf.Tensor | None = None,
215
+ use_cache: bool = False,
216
+ output_attentions: bool = False,
217
+ ):
218
+ query = self.q_proj(hidden_states)
219
+ key = self.k_proj(hidden_states)
220
+ value = self.v_proj(hidden_states)
221
+
222
+ query = self._split_heads(query, True)
223
+ key = self._split_heads(key, True)
224
+ value = self._split_heads(value, False)
225
+
226
+ sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
227
+ sincos = tf.split(sincos, 2, axis=-1)
228
+ if self.rotary_dim is not None:
229
+ k_rot = key[:, :, :, : self.rotary_dim]
230
+ k_pass = key[:, :, :, self.rotary_dim :]
231
+
232
+ q_rot = query[:, :, :, : self.rotary_dim]
233
+ q_pass = query[:, :, :, self.rotary_dim :]
234
+
235
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
236
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
237
+
238
+ key = tf.concat((k_rot, k_pass), axis=-1)
239
+ query = tf.concat((q_rot, q_pass), axis=-1)
240
+ else:
241
+ key = apply_rotary_pos_emb(key, sincos)
242
+ query = apply_rotary_pos_emb(query, sincos)
243
+
244
+ key = tf.transpose(key, (0, 2, 1, 3))
245
+ query = tf.transpose(query, (0, 2, 1, 3))
246
+
247
+ if layer_past is not None:
248
+ past_key = layer_past[0]
249
+ past_value = layer_past[1]
250
+ key = tf.concat((past_key, key), axis=-2)
251
+ value = tf.concat((past_value, value), axis=-2)
252
+
253
+ if use_cache is True:
254
+ present = (key, value)
255
+ else:
256
+ present = None
257
+
258
+ # compute self-attention: V x Softmax(QK^T)
259
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
260
+
261
+ attn_output = self._merge_heads(attn_output)
262
+ attn_output = self.out_proj(attn_output)
263
+ attn_output = self.resid_dropout(attn_output)
264
+
265
+ outputs = (attn_output, present)
266
+ if output_attentions:
267
+ outputs += (attn_weights,)
268
+
269
+ return outputs # a, present, (attentions)
270
+
271
+ def build(self, input_shape=None):
272
+ if self.built:
273
+ return
274
+ self.built = True
275
+ if getattr(self, "q_proj", None) is not None:
276
+ with tf.name_scope(self.q_proj.name):
277
+ self.q_proj.build([None, None, self.embed_dim])
278
+ if getattr(self, "k_proj", None) is not None:
279
+ with tf.name_scope(self.k_proj.name):
280
+ self.k_proj.build([None, None, self.embed_dim])
281
+ if getattr(self, "v_proj", None) is not None:
282
+ with tf.name_scope(self.v_proj.name):
283
+ self.v_proj.build([None, None, self.embed_dim])
284
+ if getattr(self, "out_proj", None) is not None:
285
+ with tf.name_scope(self.out_proj.name):
286
+ self.out_proj.build([None, None, self.embed_dim])
287
+
288
+
289
+ class TFGPTJMLP(keras.layers.Layer):
290
+ def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
291
+ super().__init__(**kwargs)
292
+ embed_dim = config.n_embd
293
+
294
+ self.fc_in = keras.layers.Dense(
295
+ intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
296
+ )
297
+ self.fc_out = keras.layers.Dense(
298
+ embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
299
+ )
300
+
301
+ self.act = get_tf_activation(config.activation_function)
302
+ self.dropout = keras.layers.Dropout(config.embd_pdrop)
303
+ self.embed_dim = config.n_embd
304
+ self.intermediate_size = intermediate_size
305
+
306
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
307
+ hidden_states = self.fc_in(hidden_states)
308
+ hidden_states = self.act(hidden_states)
309
+ hidden_states = self.fc_out(hidden_states)
310
+ hidden_states = self.dropout(hidden_states)
311
+ return hidden_states
312
+
313
+ def build(self, input_shape=None):
314
+ if self.built:
315
+ return
316
+ self.built = True
317
+ if getattr(self, "fc_in", None) is not None:
318
+ with tf.name_scope(self.fc_in.name):
319
+ self.fc_in.build([None, None, self.embed_dim])
320
+ if getattr(self, "fc_out", None) is not None:
321
+ with tf.name_scope(self.fc_out.name):
322
+ self.fc_out.build([None, None, self.intermediate_size])
323
+
324
+
325
+ class TFGPTJBlock(keras.layers.Layer):
326
+ def __init__(self, config: GPTJConfig, **kwargs):
327
+ super().__init__(**kwargs)
328
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
329
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
330
+ self.attn = TFGPTJAttention(config, name="attn")
331
+ self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
332
+ self.config = config
333
+
334
+ def call(
335
+ self,
336
+ hidden_states: tf.Tensor,
337
+ layer_past: tf.Tensor | None = None,
338
+ attention_mask: tf.Tensor | None = None,
339
+ position_ids: tf.Tensor | None = None,
340
+ head_mask: tf.Tensor | None = None,
341
+ use_cache: bool = False,
342
+ output_attentions: bool = False,
343
+ ):
344
+ residual = hidden_states
345
+ hidden_states = self.ln_1(hidden_states)
346
+ attn_outputs = self.attn(
347
+ hidden_states=hidden_states,
348
+ layer_past=layer_past,
349
+ attention_mask=attention_mask,
350
+ position_ids=position_ids,
351
+ head_mask=head_mask,
352
+ use_cache=use_cache,
353
+ output_attentions=output_attentions,
354
+ ) # attn_outputs: attn_output, present, (attentions)
355
+ attn_output = attn_outputs[0]
356
+ outputs = attn_outputs[1:]
357
+
358
+ feed_forward_hidden_states = self.mlp(hidden_states)
359
+ hidden_states = attn_output + feed_forward_hidden_states + residual
360
+
361
+ if use_cache:
362
+ outputs = (hidden_states,) + outputs
363
+ else:
364
+ outputs = (hidden_states,) + outputs[1:]
365
+ return outputs # hidden_states, present, (attentions)
366
+
367
+ def build(self, input_shape=None):
368
+ if self.built:
369
+ return
370
+ self.built = True
371
+ if getattr(self, "ln_1", None) is not None:
372
+ with tf.name_scope(self.ln_1.name):
373
+ self.ln_1.build([None, None, self.config.n_embd])
374
+ if getattr(self, "attn", None) is not None:
375
+ with tf.name_scope(self.attn.name):
376
+ self.attn.build(None)
377
+ if getattr(self, "mlp", None) is not None:
378
+ with tf.name_scope(self.mlp.name):
379
+ self.mlp.build(None)
380
+
381
+
382
+ @keras_serializable
383
+ class TFGPTJMainLayer(keras.layers.Layer):
384
+ config_class = GPTJConfig
385
+
386
+ def __init__(self, config: GPTJConfig, *inputs, **kwargs):
387
+ super().__init__(*inputs, **kwargs)
388
+
389
+ self.config = config
390
+ self.output_attentions = config.output_attentions
391
+ self.output_hidden_states = config.output_hidden_states
392
+ self.use_cache = config.use_cache
393
+ self.return_dict = config.use_return_dict
394
+
395
+ self.num_hidden_layers = config.n_layer
396
+ self.n_embd = config.n_embd
397
+ self.n_positions = config.n_positions
398
+ self.initializer_range = config.initializer_range
399
+
400
+ self.wte = TFSharedEmbeddings(
401
+ config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
402
+ )
403
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
404
+ self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
405
+ self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
406
+ self.embed_dim = config.n_embd
407
+
408
+ def get_input_embeddings(self):
409
+ return self.wte
410
+
411
+ def set_input_embeddings(self, value: tf.Tensor):
412
+ self.wte.weight = value
413
+ self.wte.vocab_size = shape_list(value)[0]
414
+
415
+ def _prune_heads(self, heads_to_prune):
416
+ """
417
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
418
+ """
419
+ raise NotImplementedError
420
+
421
+ @unpack_inputs
422
+ def call(
423
+ self,
424
+ input_ids=None,
425
+ past_key_values=None,
426
+ attention_mask=None,
427
+ token_type_ids=None,
428
+ position_ids=None,
429
+ head_mask=None,
430
+ inputs_embeds=None,
431
+ use_cache=None,
432
+ output_attentions=None,
433
+ output_hidden_states=None,
434
+ return_dict=None,
435
+ training=False,
436
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
437
+ if input_ids is not None and inputs_embeds is not None:
438
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
439
+ elif input_ids is not None:
440
+ input_shape = shape_list(input_ids)
441
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
442
+ elif inputs_embeds is not None:
443
+ input_shape = shape_list(inputs_embeds)[:-1]
444
+ else:
445
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
446
+
447
+ if past_key_values is None:
448
+ past_length = 0
449
+ past_key_values = [None] * len(self.h)
450
+ else:
451
+ past_length = shape_list(past_key_values[0][0])[-2]
452
+
453
+ if position_ids is None:
454
+ position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
455
+
456
+ if attention_mask is not None:
457
+ # We create a 3D attention mask from a 2D tensor mask.
458
+ # Sizes are [batch_size, 1, 1, to_seq_length]
459
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
460
+ # this attention mask is more simple than the triangular masking of causal attention
461
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
462
+ attention_mask_shape = shape_list(attention_mask)
463
+ attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
464
+
465
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
466
+ # masked positions, this operation will create a tensor which is 0.0 for
467
+ # positions we want to attend and -10000.0 for masked positions.
468
+ # Since we are adding it to the raw scores before the softmax, this is
469
+ # effectively the same as removing these entirely.
470
+ one_cst = tf.constant(1.0)
471
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
472
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
473
+
474
+ # Prepare head mask if needed
475
+ # 1.0 in head_mask indicate we keep the head
476
+ # attention_probs has shape bsz x n_heads x N x N
477
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
478
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
479
+ if head_mask is not None:
480
+ raise NotImplementedError
481
+ else:
482
+ head_mask = [None] * self.num_hidden_layers
483
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
484
+
485
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
486
+
487
+ if inputs_embeds is None:
488
+ check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
489
+ inputs_embeds = self.wte(input_ids, mode="embedding")
490
+
491
+ if token_type_ids is not None:
492
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
493
+ token_type_embeds = self.wte(token_type_ids, mode="embedding")
494
+ else:
495
+ token_type_embeds = tf.constant(0.0)
496
+
497
+ token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
498
+ hidden_states = inputs_embeds + token_type_embeds
499
+ hidden_states = self.drop(hidden_states, training=training)
500
+
501
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
502
+
503
+ presents = () if use_cache else None
504
+ all_attentions = () if output_attentions else None
505
+ all_hidden_states = () if output_hidden_states else None
506
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
507
+ if output_hidden_states:
508
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
509
+
510
+ outputs = block(
511
+ hidden_states=hidden_states,
512
+ layer_past=layer_past,
513
+ attention_mask=attention_mask,
514
+ position_ids=position_ids,
515
+ head_mask=head_mask[i],
516
+ use_cache=use_cache,
517
+ output_attentions=output_attentions,
518
+ training=training,
519
+ )
520
+
521
+ hidden_states = outputs[0]
522
+ if use_cache:
523
+ presents = presents + (outputs[1],)
524
+
525
+ if output_attentions:
526
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
527
+
528
+ hidden_states = self.ln_f(hidden_states)
529
+
530
+ hidden_states = tf.reshape(hidden_states, output_shape)
531
+ # Add last hidden state
532
+ if output_hidden_states:
533
+ all_hidden_states = all_hidden_states + (hidden_states,)
534
+
535
+ if output_attentions:
536
+ # let the number of heads free (-1) so we can extract attention even after head pruning
537
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
538
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
539
+
540
+ if not return_dict:
541
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
542
+
543
+ return TFBaseModelOutputWithPast(
544
+ last_hidden_state=hidden_states,
545
+ past_key_values=presents,
546
+ hidden_states=all_hidden_states,
547
+ attentions=all_attentions,
548
+ )
549
+
550
+ def build(self, input_shape=None):
551
+ if self.built:
552
+ return
553
+ self.built = True
554
+ if getattr(self, "wte", None) is not None:
555
+ with tf.name_scope(self.wte.name):
556
+ self.wte.build(None)
557
+ if getattr(self, "ln_f", None) is not None:
558
+ with tf.name_scope(self.ln_f.name):
559
+ self.ln_f.build([None, None, self.embed_dim])
560
+ if getattr(self, "h", None) is not None:
561
+ for layer in self.h:
562
+ with tf.name_scope(layer.name):
563
+ layer.build(None)
564
+
565
+
566
+ class TFGPTJPreTrainedModel(TFPreTrainedModel):
567
+ """
568
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
569
+ models.
570
+ """
571
+
572
+ config_class = GPTJConfig
573
+ base_model_prefix = "transformer"
574
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
575
+ _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
576
+
577
+
578
+ GPTJ_START_DOCSTRING = r"""
579
+
580
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
581
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
582
+ etc.)
583
+
584
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
585
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
586
+ behavior.
587
+
588
+ <Tip>
589
+
590
+ TensorFlow models and layers in `transformers` accept two formats as input:
591
+
592
+ - having all inputs as keyword arguments (like PyTorch models), or
593
+ - having all inputs as a list, tuple or dict in the first positional argument.
594
+
595
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
596
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
597
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
598
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
599
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
600
+ positional argument:
601
+
602
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
603
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
604
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
605
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
606
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
607
+
608
+ Note that when creating models and layers with
609
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
610
+ about any of this, as you can just pass inputs like you would to any other Python function!
611
+
612
+ </Tip>
613
+
614
+ Parameters:
615
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
616
+ Initializing with a config file does not load the weights associated with the model, only the
617
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
618
+ """
619
+
620
+ GPTJ_INPUTS_DOCSTRING = r"""
621
+ Args:
622
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
623
+ `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
624
+ input past key value states). Indices of input sequence tokens in the vocabulary.
625
+
626
+ If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
627
+
628
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
629
+ [`PreTrainedTokenizer.encode`] for details.
630
+
631
+ [What are input IDs?](../glossary#input-ids)
632
+ past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
633
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
634
+ `past` output below). Can be used to speed up sequential decoding. The token ids which have their past
635
+ given to this model should not be passed as input ids as they have already been computed.
636
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
637
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
638
+
639
+ - 1 for tokens that are **not masked**,
640
+ - 0 for tokens that are **masked**.
641
+
642
+ [What are attention masks?](../glossary#attention-mask)
643
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
644
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
645
+ 1]`:
646
+
647
+ - 0 corresponds to a *sentence A* token,
648
+ - 1 corresponds to a *sentence B* token.
649
+
650
+ [What are token type IDs?](../glossary#token-type-ids)
651
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
652
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
653
+ config.max_position_embeddings - 1]`.
654
+
655
+ [What are position IDs?](../glossary#position-ids)
656
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
657
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
658
+
659
+ - 1 indicates the head is **not masked**,
660
+ - 0 indicates the head is **masked**.
661
+
662
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
663
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
664
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
665
+ model's internal embedding lookup matrix.
666
+ output_attentions (`bool`, *optional*):
667
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
668
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
669
+ config will be used instead.
670
+ output_hidden_states (`bool`, *optional*):
671
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
672
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
673
+ used instead.
674
+ return_dict (`bool`, *optional*):
675
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
676
+ in eager mode, in graph mode the value will always be set to True.
677
+ training (`bool`, *optional*, defaults to `False`):
678
+ Whether or not to use the model in training mode (some modules like dropout modules have different
679
+ behaviors between training and evaluation).
680
+ """
681
+
682
+
683
+ @add_start_docstrings(
684
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
685
+ GPTJ_START_DOCSTRING,
686
+ )
687
+ class TFGPTJModel(TFGPTJPreTrainedModel):
688
+ def __init__(self, config, *inputs, **kwargs):
689
+ super().__init__(config, *inputs, **kwargs)
690
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
691
+
692
+ @unpack_inputs
693
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
694
+ @add_code_sample_docstrings(
695
+ checkpoint=_CHECKPOINT_FOR_DOC,
696
+ output_type=TFBaseModelOutputWithPast,
697
+ config_class=_CONFIG_FOR_DOC,
698
+ )
699
+ def call(
700
+ self,
701
+ input_ids: TFModelInputType | None = None,
702
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
703
+ attention_mask: np.ndarray | tf.Tensor | None = None,
704
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
705
+ position_ids: np.ndarray | tf.Tensor | None = None,
706
+ head_mask: np.ndarray | tf.Tensor | None = None,
707
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
708
+ use_cache: Optional[bool] = None,
709
+ output_attentions: Optional[bool] = None,
710
+ output_hidden_states: Optional[bool] = None,
711
+ return_dict: Optional[bool] = None,
712
+ training: Optional[bool] = False,
713
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
714
+ r"""
715
+ use_cache (`bool`, *optional*, defaults to `True`):
716
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
717
+ `past`). Set to `False` during training, `True` during generation
718
+ """
719
+
720
+ outputs = self.transformer(
721
+ input_ids=input_ids,
722
+ past_key_values=past_key_values,
723
+ attention_mask=attention_mask,
724
+ token_type_ids=token_type_ids,
725
+ position_ids=position_ids,
726
+ head_mask=head_mask,
727
+ inputs_embeds=inputs_embeds,
728
+ use_cache=use_cache,
729
+ output_attentions=output_attentions,
730
+ output_hidden_states=output_hidden_states,
731
+ return_dict=return_dict,
732
+ training=training,
733
+ )
734
+
735
+ return outputs
736
+
737
+ def build(self, input_shape=None):
738
+ if self.built:
739
+ return
740
+ self.built = True
741
+ if getattr(self, "transformer", None) is not None:
742
+ with tf.name_scope(self.transformer.name):
743
+ self.transformer.build(None)
744
+
745
+
746
+ @add_start_docstrings(
747
+ """
748
+ The GPT-J Model transformer with a language modeling head on top.
749
+ """,
750
+ GPTJ_START_DOCSTRING,
751
+ )
752
+ class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
753
+ def __init__(self, config, *inputs, **kwargs):
754
+ super().__init__(config, *inputs, **kwargs)
755
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
756
+ self.lm_head = keras.layers.Dense(
757
+ config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
758
+ )
759
+ self.config = config
760
+
761
+ def get_output_embeddings(self):
762
+ return self.lm_head
763
+
764
+ def set_output_embeddings(self, new_embeddings):
765
+ self.lm_head = new_embeddings
766
+
767
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
768
+ token_type_ids = kwargs.get("token_type_ids", None)
769
+ # only last token for inputs_ids if past is defined in kwargs
770
+ if past_key_values:
771
+ inputs = tf.expand_dims(inputs[:, -1], -1)
772
+ if token_type_ids is not None:
773
+ token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
774
+
775
+ position_ids = kwargs.get("position_ids", None)
776
+ attention_mask = kwargs.get("attention_mask", None)
777
+
778
+ if attention_mask is not None and position_ids is None:
779
+ position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
780
+ if past_key_values:
781
+ position_ids = tf.expand_dims(position_ids[:, -1], -1)
782
+
783
+ return {
784
+ "input_ids": inputs,
785
+ "attention_mask": attention_mask,
786
+ "position_ids": position_ids,
787
+ "past_key_values": past_key_values,
788
+ "use_cache": use_cache,
789
+ "token_type_ids": token_type_ids,
790
+ }
791
+
792
+ @unpack_inputs
793
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
794
+ @add_code_sample_docstrings(
795
+ checkpoint=_CHECKPOINT_FOR_DOC,
796
+ output_type=TFCausalLMOutputWithPast,
797
+ config_class=_CONFIG_FOR_DOC,
798
+ )
799
+ def call(
800
+ self,
801
+ input_ids: TFModelInputType | None = None,
802
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
803
+ attention_mask: np.ndarray | tf.Tensor | None = None,
804
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
805
+ position_ids: np.ndarray | tf.Tensor | None = None,
806
+ head_mask: np.ndarray | tf.Tensor | None = None,
807
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
808
+ labels: np.ndarray | tf.Tensor | None = None,
809
+ use_cache: Optional[bool] = None,
810
+ output_attentions: Optional[bool] = None,
811
+ output_hidden_states: Optional[bool] = None,
812
+ return_dict: Optional[bool] = None,
813
+ training: Optional[bool] = False,
814
+ ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
815
+ r"""
816
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
817
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
818
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
819
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
820
+ """
821
+
822
+ transformer_outputs = self.transformer(
823
+ input_ids=input_ids,
824
+ past_key_values=past_key_values,
825
+ attention_mask=attention_mask,
826
+ token_type_ids=token_type_ids,
827
+ position_ids=position_ids,
828
+ head_mask=head_mask,
829
+ inputs_embeds=inputs_embeds,
830
+ use_cache=use_cache,
831
+ output_attentions=output_attentions,
832
+ output_hidden_states=output_hidden_states,
833
+ return_dict=return_dict,
834
+ training=training,
835
+ )
836
+ hidden_states = transformer_outputs[0]
837
+ lm_logits = self.lm_head(hidden_states)
838
+
839
+ loss = None
840
+ if labels is not None:
841
+ # shift labels to the left and cut last logit token
842
+ shifted_logits = lm_logits[:, :-1]
843
+ labels = labels[:, 1:]
844
+ loss = self.hf_compute_loss(labels, shifted_logits)
845
+
846
+ if not return_dict:
847
+ output = (lm_logits,) + transformer_outputs[1:]
848
+ return ((loss,) + output) if loss is not None else output
849
+
850
+ return TFCausalLMOutputWithPast(
851
+ loss=loss,
852
+ logits=lm_logits,
853
+ past_key_values=transformer_outputs.past_key_values,
854
+ hidden_states=transformer_outputs.hidden_states,
855
+ attentions=transformer_outputs.attentions,
856
+ )
857
+
858
+ def build(self, input_shape=None):
859
+ if self.built:
860
+ return
861
+ self.built = True
862
+ if getattr(self, "transformer", None) is not None:
863
+ with tf.name_scope(self.transformer.name):
864
+ self.transformer.build(None)
865
+ if getattr(self, "lm_head", None) is not None:
866
+ with tf.name_scope(self.lm_head.name):
867
+ self.lm_head.build([None, None, self.config.n_embd])
868
+
869
+
870
+ @add_start_docstrings(
871
+ """
872
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
873
+
874
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
875
+ (e.g. GPT, GPT-2, GPT-Neo) do.
876
+
877
+ Since it does classification on the last token, it requires to know the position of the last token. If a
878
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
879
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
880
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
881
+ each row of the batch).
882
+ """,
883
+ GPTJ_START_DOCSTRING,
884
+ )
885
+ class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
886
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
887
+
888
+ def __init__(self, config, *inputs, **kwargs):
889
+ super().__init__(config, *inputs, **kwargs)
890
+ self.num_labels = config.num_labels
891
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
892
+ self.score = keras.layers.Dense(
893
+ self.num_labels,
894
+ use_bias=False,
895
+ kernel_initializer=get_initializer(config.initializer_range),
896
+ name="score",
897
+ )
898
+ self.config = config
899
+
900
+ @unpack_inputs
901
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
902
+ @add_code_sample_docstrings(
903
+ checkpoint=_CHECKPOINT_FOR_DOC,
904
+ output_type=TFSequenceClassifierOutputWithPast,
905
+ config_class=_CONFIG_FOR_DOC,
906
+ )
907
+ def call(
908
+ self,
909
+ input_ids: TFModelInputType | None = None,
910
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
911
+ attention_mask: np.ndarray | tf.Tensor | None = None,
912
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
913
+ position_ids: np.ndarray | tf.Tensor | None = None,
914
+ head_mask: np.ndarray | tf.Tensor | None = None,
915
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
916
+ labels: np.ndarray | tf.Tensor | None = None,
917
+ use_cache: Optional[bool] = None,
918
+ output_attentions: Optional[bool] = None,
919
+ output_hidden_states: Optional[bool] = None,
920
+ return_dict: Optional[bool] = None,
921
+ training: Optional[bool] = False,
922
+ ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
923
+ r"""
924
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
925
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
926
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
927
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
928
+ """
929
+
930
+ transformer_outputs = self.transformer(
931
+ input_ids=input_ids,
932
+ past_key_values=past_key_values,
933
+ attention_mask=attention_mask,
934
+ token_type_ids=token_type_ids,
935
+ position_ids=position_ids,
936
+ head_mask=head_mask,
937
+ inputs_embeds=inputs_embeds,
938
+ use_cache=use_cache,
939
+ output_attentions=output_attentions,
940
+ output_hidden_states=output_hidden_states,
941
+ return_dict=return_dict,
942
+ training=training,
943
+ )
944
+ hidden_states = transformer_outputs[0]
945
+ logits = self.score(hidden_states)
946
+ logits_shape = shape_list(logits)
947
+ in_logits = None
948
+ if self.config.pad_token_id is None:
949
+ sequence_lengths = -1
950
+ else:
951
+ if input_ids is not None:
952
+ sequence_lengths = (
953
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
954
+ - 1
955
+ )
956
+ sequence_lengths = tf.where(
957
+ sequence_lengths >= 0,
958
+ sequence_lengths,
959
+ tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,
960
+ )
961
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
962
+ else:
963
+ sequence_lengths = -1
964
+ logger.warning(
965
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
966
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
967
+ )
968
+ loss = None
969
+
970
+ if labels is not None:
971
+ if self.config.pad_token_id is None and logits_shape[0] != 1:
972
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
973
+
974
+ if not tf.is_tensor(sequence_lengths):
975
+ in_logits = logits[0 : logits_shape[0], sequence_lengths]
976
+
977
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
978
+ pooled_logits = in_logits if in_logits is not None else logits
979
+
980
+ if not return_dict:
981
+ output = (pooled_logits,) + transformer_outputs[1:]
982
+ return ((loss,) + output) if loss is not None else output
983
+
984
+ return TFSequenceClassifierOutputWithPast(
985
+ loss=loss,
986
+ logits=pooled_logits,
987
+ past_key_values=transformer_outputs.past_key_values,
988
+ hidden_states=transformer_outputs.hidden_states,
989
+ attentions=transformer_outputs.attentions,
990
+ )
991
+
992
+ def build(self, input_shape=None):
993
+ if self.built:
994
+ return
995
+ self.built = True
996
+ if getattr(self, "transformer", None) is not None:
997
+ with tf.name_scope(self.transformer.name):
998
+ self.transformer.build(None)
999
+ if getattr(self, "score", None) is not None:
1000
+ with tf.name_scope(self.score.name):
1001
+ self.score.build([None, None, self.config.n_embd])
1002
+
1003
+
1004
+ @add_start_docstrings(
1005
+ """
1006
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1007
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1008
+ """,
1009
+ GPTJ_START_DOCSTRING,
1010
+ )
1011
+ class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
1012
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
1013
+
1014
+ def __init__(self, config, *inputs, **kwargs):
1015
+ super().__init__(config, *inputs, **kwargs)
1016
+ self.num_labels = config.num_labels
1017
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
1018
+ self.qa_outputs = keras.layers.Dense(
1019
+ self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1020
+ )
1021
+ self.config = config
1022
+
1023
+ @unpack_inputs
1024
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1025
+ @add_code_sample_docstrings(
1026
+ checkpoint=_CHECKPOINT_FOR_DOC,
1027
+ output_type=TFQuestionAnsweringModelOutput,
1028
+ config_class=_CONFIG_FOR_DOC,
1029
+ )
1030
+ def call(
1031
+ self,
1032
+ input_ids: TFModelInputType | None = None,
1033
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1034
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1035
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1036
+ position_ids: np.ndarray | tf.Tensor | None = None,
1037
+ head_mask: np.ndarray | tf.Tensor | None = None,
1038
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1039
+ start_positions: np.ndarray | tf.Tensor | None = None,
1040
+ end_positions: np.ndarray | tf.Tensor | None = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ training: Optional[bool] = False,
1045
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1046
+ r"""
1047
+ start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
1048
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1049
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1050
+ are not taken into account for computing the loss.
1051
+ end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
1052
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1053
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1054
+ are not taken into account for computing the loss.
1055
+ """
1056
+
1057
+ transformer_outputs = self.transformer(
1058
+ input_ids=input_ids,
1059
+ past_key_values=past_key_values,
1060
+ attention_mask=attention_mask,
1061
+ token_type_ids=token_type_ids,
1062
+ position_ids=position_ids,
1063
+ head_mask=head_mask,
1064
+ inputs_embeds=inputs_embeds,
1065
+ output_attentions=output_attentions,
1066
+ output_hidden_states=output_hidden_states,
1067
+ return_dict=return_dict,
1068
+ training=training,
1069
+ )
1070
+ sequence_output = transformer_outputs[0]
1071
+
1072
+ logits = self.qa_outputs(sequence_output)
1073
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1074
+ start_logits = tf.squeeze(start_logits, axis=-1)
1075
+ end_logits = tf.squeeze(end_logits, axis=-1)
1076
+
1077
+ loss = None
1078
+ if start_positions is not None and end_positions is not None:
1079
+ labels = {"start_position": start_positions}
1080
+ labels["end_position"] = end_positions
1081
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1082
+
1083
+ if not return_dict:
1084
+ output = (start_logits, end_logits) + transformer_outputs[2:]
1085
+ return ((loss,) + output) if loss is not None else output
1086
+
1087
+ return TFQuestionAnsweringModelOutput(
1088
+ loss=loss,
1089
+ start_logits=start_logits,
1090
+ end_logits=end_logits,
1091
+ hidden_states=transformer_outputs.hidden_states,
1092
+ attentions=transformer_outputs.attentions,
1093
+ )
1094
+
1095
+ def build(self, input_shape=None):
1096
+ if self.built:
1097
+ return
1098
+ self.built = True
1099
+ if getattr(self, "transformer", None) is not None:
1100
+ with tf.name_scope(self.transformer.name):
1101
+ self.transformer.build(None)
1102
+ if getattr(self, "qa_outputs", None) is not None:
1103
+ with tf.name_scope(self.qa_outputs.name):
1104
+ self.qa_outputs.build([None, None, self.config.hidden_size])
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_groupvit": [
21
+ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "GroupViTConfig",
23
+ "GroupViTOnnxConfig",
24
+ "GroupViTTextConfig",
25
+ "GroupViTVisionConfig",
26
+ ],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_groupvit"] = [
36
+ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "GroupViTModel",
38
+ "GroupViTPreTrainedModel",
39
+ "GroupViTTextModel",
40
+ "GroupViTVisionModel",
41
+ ]
42
+
43
+ try:
44
+ if not is_tf_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_tf_groupvit"] = [
50
+ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "TFGroupViTModel",
52
+ "TFGroupViTPreTrainedModel",
53
+ "TFGroupViTTextModel",
54
+ "TFGroupViTVisionModel",
55
+ ]
56
+
57
+ if TYPE_CHECKING:
58
+ from .configuration_groupvit import (
59
+ GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
60
+ GroupViTConfig,
61
+ GroupViTOnnxConfig,
62
+ GroupViTTextConfig,
63
+ GroupViTVisionConfig,
64
+ )
65
+
66
+ try:
67
+ if not is_torch_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_groupvit import (
73
+ GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
74
+ GroupViTModel,
75
+ GroupViTPreTrainedModel,
76
+ GroupViTTextModel,
77
+ GroupViTVisionModel,
78
+ )
79
+
80
+ try:
81
+ if not is_tf_available():
82
+ raise OptionalDependencyNotAvailable()
83
+ except OptionalDependencyNotAvailable:
84
+ pass
85
+ else:
86
+ from .modeling_tf_groupvit import (
87
+ TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
88
+ TFGroupViTModel,
89
+ TFGroupViTPreTrainedModel,
90
+ TFGroupViTTextModel,
91
+ TFGroupViTVisionModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GroupViT model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from ...processing_utils import ProcessorMixin
28
+ from ...utils import TensorType
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
34
+ "nvidia/groupvit-gcc-yfcc": "https://huggingface.co/nvidia/groupvit-gcc-yfcc/resolve/main/config.json",
35
+ }
36
+
37
+
38
+ class GroupViTTextConfig(PretrainedConfig):
39
+ r"""
40
+ This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
41
+ GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
42
+ with the defaults will yield a similar configuration to that of the GroupViT
43
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
44
+
45
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
46
+ documentation from [`PretrainedConfig`] for more information.
47
+
48
+ Args:
49
+ vocab_size (`int`, *optional*, defaults to 49408):
50
+ Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
51
+ by the `inputs_ids` passed when calling [`GroupViTModel`].
52
+ hidden_size (`int`, *optional*, defaults to 256):
53
+ Dimensionality of the encoder layers and the pooler layer.
54
+ intermediate_size (`int`, *optional*, defaults to 1024):
55
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
56
+ num_hidden_layers (`int`, *optional*, defaults to 12):
57
+ Number of hidden layers in the Transformer encoder.
58
+ num_attention_heads (`int`, *optional*, defaults to 4):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 77):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
64
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
65
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
67
+ The epsilon used by the layer normalization layers.
68
+ attention_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for the attention probabilities.
70
+ dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ initializer_factor (`float`, *optional*, defaults to 1.0):
75
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
76
+ testing).
77
+
78
+ Example:
79
+
80
+ ```python
81
+ >>> from transformers import GroupViTTextConfig, GroupViTTextModel
82
+
83
+ >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
84
+ >>> configuration = GroupViTTextConfig()
85
+
86
+ >>> model = GroupViTTextModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```"""
91
+
92
+ model_type = "groupvit_text_model"
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=49408,
97
+ hidden_size=256,
98
+ intermediate_size=1024,
99
+ num_hidden_layers=12,
100
+ num_attention_heads=4,
101
+ max_position_embeddings=77,
102
+ hidden_act="quick_gelu",
103
+ layer_norm_eps=1e-5,
104
+ dropout=0.0,
105
+ attention_dropout=0.0,
106
+ initializer_range=0.02,
107
+ initializer_factor=1.0,
108
+ pad_token_id=1,
109
+ bos_token_id=49406,
110
+ eos_token_id=49407,
111
+ **kwargs,
112
+ ):
113
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
114
+
115
+ self.vocab_size = vocab_size
116
+ self.hidden_size = hidden_size
117
+ self.intermediate_size = intermediate_size
118
+ self.dropout = dropout
119
+ self.num_hidden_layers = num_hidden_layers
120
+ self.num_attention_heads = num_attention_heads
121
+ self.max_position_embeddings = max_position_embeddings
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.hidden_act = hidden_act
124
+ self.initializer_range = initializer_range
125
+ self.initializer_factor = initializer_factor
126
+ self.attention_dropout = attention_dropout
127
+
128
+ @classmethod
129
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
130
+ cls._set_token_in_kwargs(kwargs)
131
+
132
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
133
+
134
+ # get the text config dict if we are loading from GroupViTConfig
135
+ if config_dict.get("model_type") == "groupvit":
136
+ config_dict = config_dict["text_config"]
137
+
138
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
139
+ logger.warning(
140
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
141
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
142
+ )
143
+
144
+ return cls.from_dict(config_dict, **kwargs)
145
+
146
+
147
+ class GroupViTVisionConfig(PretrainedConfig):
148
+ r"""
149
+ This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
150
+ an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
151
+ configuration with the defaults will yield a similar configuration to that of the GroupViT
152
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
153
+
154
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
155
+ documentation from [`PretrainedConfig`] for more information.
156
+
157
+ Args:
158
+ hidden_size (`int`, *optional*, defaults to 384):
159
+ Dimensionality of the encoder layers and the pooler layer.
160
+ intermediate_size (`int`, *optional*, defaults to 1536):
161
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
162
+ depths (`List[int]`, *optional*, defaults to [6, 3, 3]):
163
+ The number of layers in each encoder block.
164
+ num_group_tokens (`List[int]`, *optional*, defaults to [64, 8, 0]):
165
+ The number of group tokens for each stage.
166
+ num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 8]):
167
+ The number of output groups for each stage, 0 means no group.
168
+ num_attention_heads (`int`, *optional*, defaults to 6):
169
+ Number of attention heads for each attention layer in the Transformer encoder.
170
+ image_size (`int`, *optional*, defaults to 224):
171
+ The size (resolution) of each image.
172
+ patch_size (`int`, *optional*, defaults to 16):
173
+ The size (resolution) of each patch.
174
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
175
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
176
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
177
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
178
+ The epsilon used by the layer normalization layers.
179
+ dropout (`float`, *optional*, defaults to 0.0):
180
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
181
+ attention_dropout (`float`, *optional*, defaults to 0.0):
182
+ The dropout ratio for the attention probabilities.
183
+ initializer_range (`float`, *optional*, defaults to 0.02):
184
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
185
+ initializer_factor (`float`, *optional*, defaults to 1.0):
186
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
187
+ testing).
188
+
189
+ Example:
190
+
191
+ ```python
192
+ >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel
193
+
194
+ >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
195
+ >>> configuration = GroupViTVisionConfig()
196
+
197
+ >>> model = GroupViTVisionModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```"""
202
+
203
+ model_type = "groupvit_vision_model"
204
+
205
+ def __init__(
206
+ self,
207
+ hidden_size=384,
208
+ intermediate_size=1536,
209
+ depths=[6, 3, 3],
210
+ num_hidden_layers=12,
211
+ num_group_tokens=[64, 8, 0],
212
+ num_output_groups=[64, 8, 8],
213
+ num_attention_heads=6,
214
+ image_size=224,
215
+ patch_size=16,
216
+ num_channels=3,
217
+ hidden_act="gelu",
218
+ layer_norm_eps=1e-5,
219
+ dropout=0.0,
220
+ attention_dropout=0.0,
221
+ initializer_range=0.02,
222
+ initializer_factor=1.0,
223
+ assign_eps=1.0,
224
+ assign_mlp_ratio=[0.5, 4],
225
+ **kwargs,
226
+ ):
227
+ super().__init__(**kwargs)
228
+
229
+ self.hidden_size = hidden_size
230
+ self.intermediate_size = intermediate_size
231
+ self.depths = depths
232
+ if num_hidden_layers != sum(depths):
233
+ logger.warning(
234
+ f"Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers ="
235
+ f" sum(depth) = {sum(depths)}"
236
+ )
237
+ self.num_hidden_layers = num_hidden_layers
238
+ self.num_group_tokens = num_group_tokens
239
+ self.num_output_groups = num_output_groups
240
+ self.num_attention_heads = num_attention_heads
241
+ self.image_size = image_size
242
+ self.patch_size = patch_size
243
+ self.num_channels = num_channels
244
+ self.hidden_act = hidden_act
245
+ self.layer_norm_eps = layer_norm_eps
246
+ self.dropout = dropout
247
+ self.attention_dropout = attention_dropout
248
+ self.initializer_range = initializer_range
249
+ self.initializer_factor = initializer_factor
250
+ self.assign_eps = assign_eps
251
+ self.assign_mlp_ratio = assign_mlp_ratio
252
+
253
+ @classmethod
254
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
255
+ cls._set_token_in_kwargs(kwargs)
256
+
257
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
258
+
259
+ # get the vision config dict if we are loading from GroupViTConfig
260
+ if config_dict.get("model_type") == "groupvit":
261
+ config_dict = config_dict["vision_config"]
262
+
263
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
264
+ logger.warning(
265
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
266
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
267
+ )
268
+
269
+ return cls.from_dict(config_dict, **kwargs)
270
+
271
+
272
+ class GroupViTConfig(PretrainedConfig):
273
+ r"""
274
+ [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
275
+ instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
276
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
277
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
278
+
279
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
280
+ documentation from [`PretrainedConfig`] for more information.
281
+
282
+ Args:
283
+ text_config (`dict`, *optional*):
284
+ Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
285
+ vision_config (`dict`, *optional*):
286
+ Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
287
+ projection_dim (`int`, *optional*, defaults to 256):
288
+ Dimentionality of text and vision projection layers.
289
+ projection_intermediate_dim (`int`, *optional*, defaults to 4096):
290
+ Dimentionality of intermediate layer of text and vision projection layers.
291
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
292
+ The inital value of the *logit_scale* parameter. Default is used as per the original GroupViT
293
+ implementation.
294
+ kwargs (*optional*):
295
+ Dictionary of keyword arguments.
296
+ """
297
+
298
+ model_type = "groupvit"
299
+
300
+ def __init__(
301
+ self,
302
+ text_config=None,
303
+ vision_config=None,
304
+ projection_dim=256,
305
+ projection_intermediate_dim=4096,
306
+ logit_scale_init_value=2.6592,
307
+ **kwargs,
308
+ ):
309
+ # If `_config_dict` exist, we use them for the backward compatibility.
310
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
311
+ # of confusion!).
312
+ text_config_dict = kwargs.pop("text_config_dict", None)
313
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
314
+
315
+ super().__init__(**kwargs)
316
+
317
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
318
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
319
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
320
+ if text_config_dict is not None:
321
+ if text_config is None:
322
+ text_config = {}
323
+
324
+ # This is the complete result when using `text_config_dict`.
325
+ _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict()
326
+
327
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
328
+ for key, value in _text_config_dict.items():
329
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
330
+ # If specified in `text_config_dict`
331
+ if key in text_config_dict:
332
+ message = (
333
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
334
+ f'The value `text_config_dict["{key}"]` will be used instead.'
335
+ )
336
+ # If inferred from default argument values (just to be super careful)
337
+ else:
338
+ message = (
339
+ f"`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. "
340
+ f'The value `text_config["{key}"]` will be overriden.'
341
+ )
342
+ logger.info(message)
343
+
344
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
345
+ text_config.update(_text_config_dict)
346
+
347
+ if vision_config_dict is not None:
348
+ if vision_config is None:
349
+ vision_config = {}
350
+
351
+ # This is the complete result when using `vision_config_dict`.
352
+ _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict()
353
+ # convert keys to string instead of integer
354
+ if "id2label" in _vision_config_dict:
355
+ _vision_config_dict["id2label"] = {
356
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
357
+ }
358
+
359
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
360
+ for key, value in _vision_config_dict.items():
361
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
362
+ # If specified in `vision_config_dict`
363
+ if key in vision_config_dict:
364
+ message = (
365
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
366
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
367
+ )
368
+ # If inferred from default argument values (just to be super careful)
369
+ else:
370
+ message = (
371
+ f"`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`."
372
+ f' The value `vision_config["{key}"]` will be overriden.'
373
+ )
374
+ logger.info(message)
375
+
376
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
377
+ vision_config.update(_vision_config_dict)
378
+
379
+ if text_config is None:
380
+ text_config = {}
381
+ logger.info("`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.")
382
+
383
+ if vision_config is None:
384
+ vision_config = {}
385
+ logger.info("`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.")
386
+
387
+ self.text_config = GroupViTTextConfig(**text_config)
388
+ self.vision_config = GroupViTVisionConfig(**vision_config)
389
+
390
+ self.projection_dim = projection_dim
391
+ self.projection_intermediate_dim = projection_intermediate_dim
392
+ self.logit_scale_init_value = logit_scale_init_value
393
+ self.initializer_range = 0.02
394
+ self.initializer_factor = 1.0
395
+ self.output_segmentation = False
396
+
397
+ @classmethod
398
+ def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs):
399
+ r"""
400
+ Instantiate a [`GroupViTConfig`] (or a derived class) from groupvit text model configuration and groupvit
401
+ vision model configuration.
402
+
403
+ Returns:
404
+ [`GroupViTConfig`]: An instance of a configuration object
405
+ """
406
+
407
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
408
+
409
+
410
+ class GroupViTOnnxConfig(OnnxConfig):
411
+ @property
412
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
413
+ return OrderedDict(
414
+ [
415
+ ("input_ids", {0: "batch", 1: "sequence"}),
416
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
417
+ ("attention_mask", {0: "batch", 1: "sequence"}),
418
+ ]
419
+ )
420
+
421
+ @property
422
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
423
+ return OrderedDict(
424
+ [
425
+ ("logits_per_image", {0: "batch"}),
426
+ ("logits_per_text", {0: "batch"}),
427
+ ("text_embeds", {0: "batch"}),
428
+ ("image_embeds", {0: "batch"}),
429
+ ]
430
+ )
431
+
432
+ @property
433
+ def atol_for_validation(self) -> float:
434
+ return 1e-4
435
+
436
+ def generate_dummy_inputs(
437
+ self,
438
+ processor: "ProcessorMixin",
439
+ batch_size: int = -1,
440
+ seq_length: int = -1,
441
+ framework: Optional["TensorType"] = None,
442
+ ) -> Mapping[str, Any]:
443
+ text_input_dict = super().generate_dummy_inputs(
444
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
445
+ )
446
+ image_input_dict = super().generate_dummy_inputs(
447
+ processor.image_processor, batch_size=batch_size, framework=framework
448
+ )
449
+ return {**text_input_dict, **image_input_dict}
450
+
451
+ @property
452
+ def default_onnx_opset(self) -> int:
453
+ return 14
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Convert GroupViT checkpoints from the original repository.
18
+
19
+ URL: https://github.com/NVlabs/GroupViT
20
+ """
21
+
22
+ import argparse
23
+
24
+ import requests
25
+ import torch
26
+ from PIL import Image
27
+
28
+ from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
29
+
30
+
31
+ def rename_key(name):
32
+ # vision encoder
33
+ if "img_encoder.pos_embed" in name:
34
+ name = name.replace("img_encoder.pos_embed", "vision_model.embeddings.position_embeddings")
35
+ if "img_encoder.patch_embed.proj" in name:
36
+ name = name.replace("img_encoder.patch_embed.proj", "vision_model.embeddings.patch_embeddings.projection")
37
+ if "img_encoder.patch_embed.norm" in name:
38
+ name = name.replace("img_encoder.patch_embed.norm", "vision_model.embeddings.layernorm")
39
+ if "img_encoder.layers" in name:
40
+ name = name.replace("img_encoder.layers", "vision_model.encoder.stages")
41
+ if "blocks" in name and "res" not in name:
42
+ name = name.replace("blocks", "layers")
43
+ if "attn" in name and "pre_assign" not in name:
44
+ name = name.replace("attn", "self_attn")
45
+ if "proj" in name and "self_attn" in name and "text" not in name:
46
+ name = name.replace("proj", "out_proj")
47
+ if "pre_assign_attn.attn.proj" in name:
48
+ name = name.replace("pre_assign_attn.attn.proj", "pre_assign_attn.attn.out_proj")
49
+ if "norm1" in name:
50
+ name = name.replace("norm1", "layer_norm1")
51
+ if "norm2" in name and "pre_assign" not in name:
52
+ name = name.replace("norm2", "layer_norm2")
53
+ if "img_encoder.norm" in name:
54
+ name = name.replace("img_encoder.norm", "vision_model.layernorm")
55
+ # text encoder
56
+ if "text_encoder.token_embedding" in name:
57
+ name = name.replace("text_encoder.token_embedding", "text_model.embeddings.token_embedding")
58
+ if "text_encoder.positional_embedding" in name:
59
+ name = name.replace("text_encoder.positional_embedding", "text_model.embeddings.position_embedding.weight")
60
+ if "text_encoder.transformer.resblocks." in name:
61
+ name = name.replace("text_encoder.transformer.resblocks.", "text_model.encoder.layers.")
62
+ if "ln_1" in name:
63
+ name = name.replace("ln_1", "layer_norm1")
64
+ if "ln_2" in name:
65
+ name = name.replace("ln_2", "layer_norm2")
66
+ if "c_fc" in name:
67
+ name = name.replace("c_fc", "fc1")
68
+ if "c_proj" in name:
69
+ name = name.replace("c_proj", "fc2")
70
+ if "text_encoder" in name:
71
+ name = name.replace("text_encoder", "text_model")
72
+ if "ln_final" in name:
73
+ name = name.replace("ln_final", "final_layer_norm")
74
+ # projection layers
75
+ if "img_projector.linear_hidden." in name:
76
+ name = name.replace("img_projector.linear_hidden.", "visual_projection.")
77
+ if "img_projector.linear_out." in name:
78
+ name = name.replace("img_projector.linear_out.", "visual_projection.3.")
79
+ if "text_projector.linear_hidden" in name:
80
+ name = name.replace("text_projector.linear_hidden", "text_projection")
81
+ if "text_projector.linear_out" in name:
82
+ name = name.replace("text_projector.linear_out", "text_projection.3")
83
+
84
+ return name
85
+
86
+
87
+ def convert_state_dict(orig_state_dict, config):
88
+ for key in orig_state_dict.copy().keys():
89
+ val = orig_state_dict.pop(key)
90
+
91
+ if "qkv" in key:
92
+ # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
93
+ # we need to split them up into separate matrices/vectors
94
+ key_split = key.split(".")
95
+ stage_num, layer_num = int(key_split[2]), int(key_split[4])
96
+ dim = config.vision_config.hidden_size
97
+ if "weight" in key:
98
+ orig_state_dict[
99
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.weight"
100
+ ] = val[:dim, :]
101
+ orig_state_dict[
102
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.weight"
103
+ ] = val[dim : dim * 2, :]
104
+ orig_state_dict[
105
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.weight"
106
+ ] = val[-dim:, :]
107
+ else:
108
+ orig_state_dict[
109
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.bias"
110
+ ] = val[:dim]
111
+ orig_state_dict[
112
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.bias"
113
+ ] = val[dim : dim * 2]
114
+ orig_state_dict[
115
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.bias"
116
+ ] = val[-dim:]
117
+ elif "in_proj" in key:
118
+ # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
119
+ # we need to split them up into separate matrices/vectors
120
+ key_split = key.split(".")
121
+ layer_num = int(key_split[3])
122
+ dim = config.text_config.hidden_size
123
+ if "weight" in key:
124
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
125
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[
126
+ dim : dim * 2, :
127
+ ]
128
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
129
+ else:
130
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
131
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
132
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
133
+ else:
134
+ new_name = rename_key(key)
135
+ # squeeze if necessary
136
+ if (
137
+ "text_projection.0" in new_name
138
+ or "text_projection.3" in new_name
139
+ or "visual_projection.0" in new_name
140
+ or "visual_projection.3" in new_name
141
+ ):
142
+ orig_state_dict[new_name] = val.squeeze_()
143
+ else:
144
+ orig_state_dict[new_name] = val
145
+
146
+ return orig_state_dict
147
+
148
+
149
+ # We will verify our results on an image of cute cats
150
+ def prepare_img():
151
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
152
+ im = Image.open(requests.get(url, stream=True).raw)
153
+ return im
154
+
155
+
156
+ @torch.no_grad()
157
+ def convert_groupvit_checkpoint(
158
+ checkpoint_path, pytorch_dump_folder_path, model_name="groupvit-gcc-yfcc", push_to_hub=False
159
+ ):
160
+ """
161
+ Copy/paste/tweak model's weights to the Transformers design.
162
+ """
163
+ config = GroupViTConfig()
164
+ model = GroupViTModel(config).eval()
165
+
166
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
167
+ new_state_dict = convert_state_dict(state_dict, config)
168
+ missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
169
+ assert missing_keys == ["text_model.embeddings.position_ids"]
170
+ assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(unexpected_keys) == 0)
171
+
172
+ # verify result
173
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
174
+ image = prepare_img()
175
+ inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt")
176
+
177
+ with torch.no_grad():
178
+ outputs = model(**inputs)
179
+
180
+ if model_name == "groupvit-gcc-yfcc":
181
+ expected_logits = torch.tensor([[13.3523, 6.3629]])
182
+ elif model_name == "groupvit-gcc-redcaps":
183
+ expected_logits = torch.tensor([[16.1873, 8.6230]])
184
+ else:
185
+ raise ValueError(f"Model name {model_name} not supported.")
186
+ assert torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)
187
+
188
+ processor.save_pretrained(pytorch_dump_folder_path)
189
+ model.save_pretrained(pytorch_dump_folder_path)
190
+ print("Successfully saved processor and model to", pytorch_dump_folder_path)
191
+
192
+ if push_to_hub:
193
+ print("Pushing to the hub...")
194
+ processor.push_to_hub(model_name, organization="nielsr")
195
+ model.push_to_hub(model_name, organization="nielsr")
196
+
197
+
198
+ if __name__ == "__main__":
199
+ parser = argparse.ArgumentParser()
200
+ parser.add_argument(
201
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
202
+ )
203
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
204
+ parser.add_argument(
205
+ "--model_name",
206
+ default="groupvit-gccy-fcc",
207
+ type=str,
208
+ help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
209
+ )
210
+ parser.add_argument(
211
+ "--push_to_hub",
212
+ action="store_true",
213
+ help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
214
+ )
215
+ args = parser.parse_args()
216
+
217
+ convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py ADDED
@@ -0,0 +1,2135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 GroupViT model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections.abc
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import Any, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
30
+ from ...modeling_tf_utils import (
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ get_initializer,
34
+ keras,
35
+ keras_serializable,
36
+ unpack_inputs,
37
+ )
38
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
39
+ from ...utils import (
40
+ ModelOutput,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_tensorflow_probability_available,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ # soft dependency
53
+ if is_tensorflow_probability_available():
54
+ try:
55
+ import tensorflow_probability as tfp
56
+
57
+ # On the first call, check whether a compatible version of TensorFlow is installed
58
+ # TensorFlow Probability depends on a recent stable release of TensorFlow
59
+ _ = tfp.distributions.Normal(loc=0.0, scale=1.0)
60
+ except ImportError:
61
+ logger.error(
62
+ "GroupViT models are not usable since `tensorflow_probability` can't be loaded. "
63
+ "It seems you have `tensorflow_probability` installed with the wrong tensorflow version."
64
+ "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
65
+ )
66
+
67
+ _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
68
+
69
+ TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
70
+ "nvidia/groupvit-gcc-yfcc",
71
+ # See all GroupViT models at https://huggingface.co/models?filter=groupvit
72
+ ]
73
+
74
+
75
+ LARGE_NEGATIVE = -1e8
76
+
77
+
78
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
79
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
80
+ """
81
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
82
+ """
83
+ src_len = shape_list(mask)[1]
84
+ tgt_len = tgt_len if tgt_len is not None else src_len
85
+ one_cst = tf.constant(1.0)
86
+ mask = tf.cast(mask, dtype=one_cst.dtype)
87
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
88
+
89
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
90
+
91
+
92
+ # contrastive loss function, adapted from
93
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
94
+ def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
95
+ return tf.math.reduce_mean(
96
+ keras.metrics.sparse_categorical_crossentropy(
97
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
98
+ )
99
+ )
100
+
101
+
102
+ # Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->groupvit
103
+ def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor:
104
+ caption_loss = contrastive_loss(similarity)
105
+ image_loss = contrastive_loss(tf.transpose(similarity))
106
+ return (caption_loss + image_loss) / 2.0
107
+
108
+
109
+ def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor:
110
+ y_soft = stable_softmax(logits, dim)
111
+ # Straight through.
112
+ index = tf.argmax(y_soft, dim)
113
+ y_hard = tf.one_hot(
114
+ index,
115
+ depth=shape_list(logits)[dim],
116
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
117
+ # This is why the following code snippet is used.
118
+ axis=range(len(shape_list(logits)))[dim],
119
+ dtype=y_soft.dtype,
120
+ )
121
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
122
+
123
+ return ret
124
+
125
+
126
+ def gumbel_softmax(logits: tf.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> tf.Tensor:
127
+ gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0)
128
+ gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype)
129
+
130
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
131
+ y_soft = stable_softmax(gumbels, dim)
132
+
133
+ if hard:
134
+ # Straight through.
135
+ index = tf.argmax(y_soft, dim)
136
+ y_hard = tf.one_hot(
137
+ index,
138
+ depth=shape_list(logits)[dim],
139
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
140
+ # This is why the following code snippet is used.
141
+ axis=range(len(shape_list(logits)))[dim],
142
+ dtype=y_soft.dtype,
143
+ )
144
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
145
+ else:
146
+ # Reparametrization trick.
147
+ ret = y_soft
148
+ return ret
149
+
150
+
151
+ def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool = False) -> tf.Tensor:
152
+ """
153
+ Args:
154
+ attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
155
+ height (`int`): height of the output attention map
156
+ width (`int`): width of the output attention map
157
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
158
+
159
+ Returns:
160
+ `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]
161
+ """
162
+
163
+ scale = (height * width // attentions.shape[2]) ** 0.5
164
+ if height > width:
165
+ feat_width = int(np.round(width / scale))
166
+ feat_height = shape_list(attentions)[2] // feat_width
167
+ else:
168
+ feat_height = int(np.round(height / scale))
169
+ feat_width = shape_list(attentions)[2] // feat_height
170
+
171
+ batch_size = shape_list(attentions)[0]
172
+ groups = shape_list(attentions)[1] # number of group token
173
+ # [batch_size, groups, height x width, groups] -> [batch_size, groups, height, width]
174
+ attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width))
175
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
176
+ if align_corners:
177
+ attentions = tf.compat.v1.image.resize(
178
+ attentions,
179
+ size=(height, width),
180
+ method="bilinear",
181
+ align_corners=align_corners,
182
+ )
183
+ else:
184
+ attentions = tf.image.resize(attentions, size=(height, width), method="bilinear")
185
+ attentions = tf.transpose(attentions, perm=(0, 3, 1, 2))
186
+ return attentions
187
+
188
+
189
+ def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor:
190
+ """
191
+ Args:
192
+ attentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer`
193
+ hw_shape (`tuple(int)`): height and width of the output attention map
194
+ Returns:
195
+ `tf.Tensor`: the attention map of shape [batch_size, groups, height, width]
196
+ """
197
+
198
+ attn_maps = []
199
+ prev_attn_masks = None
200
+ for attn_masks in attentions:
201
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
202
+ attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1))
203
+ if prev_attn_masks is None:
204
+ prev_attn_masks = attn_masks
205
+ else:
206
+ prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks)
207
+ # [batch_size, height x width, num_groups] -> [batch_size, num_groups, height x width] -> [batch_size, num_groups, height, width]
208
+ cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape)
209
+ attn_maps.append(cur_attn_map)
210
+
211
+ # [batch_size, num_groups, height, width]
212
+ final_grouping = attn_maps[-1]
213
+
214
+ return tf.stop_gradient(final_grouping)
215
+
216
+
217
+ @dataclass
218
+ class TFGroupViTModelOutput(ModelOutput):
219
+ """
220
+ Args:
221
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
222
+ Contrastive loss for image-text similarity.
223
+ logits_per_image (`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
224
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
225
+ similarity scores.
226
+ logits_per_text (`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
227
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
228
+ similarity scores.
229
+ segmentation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
230
+ Classification scores for each pixel.
231
+
232
+ <Tip warning={true}>
233
+
234
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
235
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
236
+ original image size as post-processing. You should always check your logits shape and resize as needed.
237
+
238
+ </Tip>
239
+
240
+ text_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
241
+ The text embeddings obtained by applying the projection layer to the pooled output of
242
+ [`TFGroupViTTextModel`].
243
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
244
+ The image embeddings obtained by applying the projection layer to the pooled output of
245
+ [`TFGroupViTVisionModel`].
246
+ text_model_output (`TFBaseModelOutputWithPooling`):
247
+ The output of the [`TFGroupViTTextModel`].
248
+ vision_model_output (`TFBaseModelOutputWithPooling`):
249
+ The output of the [`TFGroupViTVisionModel`].
250
+ """
251
+
252
+ loss: tf.Tensor | None = None
253
+ logits_per_image: tf.Tensor = None
254
+ logits_per_text: tf.Tensor = None
255
+ segmentation_logits: tf.Tensor = None
256
+ text_embeds: tf.Tensor = None
257
+ image_embeds: tf.Tensor = None
258
+ text_model_output: TFBaseModelOutputWithPooling = None
259
+ vision_model_output: TFBaseModelOutputWithPooling = None
260
+
261
+ def to_tuple(self) -> Tuple[Any]:
262
+ return tuple(
263
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
264
+ for k in self.keys()
265
+ )
266
+
267
+
268
+ class TFGroupViTCrossAttentionLayer(keras.layers.Layer):
269
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
270
+ super().__init__(**kwargs)
271
+ self.attn = TFGroupViTAttention(config, name="attn")
272
+ self.norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm2")
273
+ self.mlp = TFGroupViTMLP(config, name="mlp")
274
+ self.norm_post = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post")
275
+ self.config = config
276
+
277
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False) -> tf.Tensor:
278
+ x = query
279
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
280
+ x = x + self.mlp(self.norm2(x))
281
+ x = self.norm_post(x)
282
+ return x
283
+
284
+ def build(self, input_shape=None):
285
+ if self.built:
286
+ return
287
+ self.built = True
288
+ if getattr(self, "attn", None) is not None:
289
+ with tf.name_scope(self.attn.name):
290
+ self.attn.build(None)
291
+ if getattr(self, "norm2", None) is not None:
292
+ with tf.name_scope(self.norm2.name):
293
+ self.norm2.build([None, None, self.config.hidden_size])
294
+ if getattr(self, "mlp", None) is not None:
295
+ with tf.name_scope(self.mlp.name):
296
+ self.mlp.build(None)
297
+ if getattr(self, "norm_post", None) is not None:
298
+ with tf.name_scope(self.norm_post.name):
299
+ self.norm_post.build([None, None, self.config.hidden_size])
300
+
301
+
302
+ class TFGroupViTAssignAttention(keras.layers.Layer):
303
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
304
+ super().__init__(**kwargs)
305
+ self.scale = config.hidden_size**-0.5
306
+
307
+ self.q_proj = keras.layers.Dense(config.hidden_size, name="q_proj")
308
+ self.k_proj = keras.layers.Dense(config.hidden_size, name="k_proj")
309
+ self.v_proj = keras.layers.Dense(config.hidden_size, name="v_proj")
310
+ self.proj = keras.layers.Dense(config.hidden_size, name="proj")
311
+ self.assign_eps = config.assign_eps
312
+ self.config = config
313
+
314
+ def get_attn(self, attn: tf.Tensor, gumbel: bool = True, hard: bool = True, training: bool = False) -> tf.Tensor:
315
+ if gumbel and training:
316
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
317
+ else:
318
+ if hard:
319
+ attn = hard_softmax(attn, dim=-2)
320
+ else:
321
+ attn = stable_softmax(attn, axis=-2)
322
+
323
+ return attn
324
+
325
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False):
326
+ value = key
327
+ # [batch_size, query_length, channels]
328
+ query = self.q_proj(query)
329
+
330
+ # [batch_size, key_length, channels]
331
+ key = self.k_proj(key)
332
+
333
+ # [batch_size, key_length, channels]
334
+ value = self.v_proj(value)
335
+
336
+ # [batch_size, query_length, key_length]
337
+ raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale
338
+
339
+ attn = self.get_attn(raw_attn, training=training)
340
+ soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False)
341
+
342
+ attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps)
343
+
344
+ out = tf.matmul(attn, value)
345
+
346
+ out = self.proj(out)
347
+
348
+ return out, soft_attn
349
+
350
+ def build(self, input_shape=None):
351
+ if self.built:
352
+ return
353
+ self.built = True
354
+ if getattr(self, "q_proj", None) is not None:
355
+ with tf.name_scope(self.q_proj.name):
356
+ self.q_proj.build([None, None, self.config.hidden_size])
357
+ if getattr(self, "k_proj", None) is not None:
358
+ with tf.name_scope(self.k_proj.name):
359
+ self.k_proj.build([None, None, self.config.hidden_size])
360
+ if getattr(self, "v_proj", None) is not None:
361
+ with tf.name_scope(self.v_proj.name):
362
+ self.v_proj.build([None, None, self.config.hidden_size])
363
+ if getattr(self, "proj", None) is not None:
364
+ with tf.name_scope(self.proj.name):
365
+ self.proj.build([None, None, self.config.hidden_size])
366
+
367
+
368
+ class TFGroupViTTokenAssign(keras.layers.Layer):
369
+ def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs):
370
+ super().__init__(**kwargs)
371
+ self.num_output_group = num_output_group
372
+ # norm on group_tokens
373
+ self.norm_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_tokens")
374
+ assign_mlp_ratio = (
375
+ config.assign_mlp_ratio
376
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
377
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
378
+ )
379
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
380
+ self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name="mlp_inter")
381
+ self.norm_post_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post_tokens")
382
+ # norm on x
383
+ self.norm_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_x")
384
+ self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name="pre_assign_attn")
385
+
386
+ self.assign = TFGroupViTAssignAttention(config, name="assign")
387
+ self.norm_new_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_new_x")
388
+ self.mlp_channels = TFGroupViTMLP(
389
+ config, config.hidden_size, channels_dim, config.hidden_size, name="mlp_channels"
390
+ )
391
+ self.config = config
392
+
393
+ def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor:
394
+ """
395
+ Args:
396
+ group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels]
397
+
398
+ Returns:
399
+ projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels]
400
+ """
401
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
402
+ projected_group_tokens = self.mlp_inter(group_tokens)
403
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
404
+ return projected_group_tokens
405
+
406
+ def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool = False):
407
+ """
408
+ Args:
409
+ image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels]
410
+ group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
411
+ """
412
+
413
+ group_tokens = self.norm_tokens(group_tokens)
414
+ image_tokens = self.norm_x(image_tokens)
415
+ # [batch_size, num_output_groups, channels]
416
+ projected_group_tokens = self.project_group_token(group_tokens)
417
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
418
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
419
+ new_image_tokens += projected_group_tokens
420
+
421
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
422
+
423
+ return new_image_tokens, attention
424
+
425
+ def build(self, input_shape=None):
426
+ if self.built:
427
+ return
428
+ self.built = True
429
+ if getattr(self, "norm_tokens", None) is not None:
430
+ with tf.name_scope(self.norm_tokens.name):
431
+ self.norm_tokens.build([None, None, self.config.hidden_size])
432
+ if getattr(self, "mlp_inter", None) is not None:
433
+ with tf.name_scope(self.mlp_inter.name):
434
+ self.mlp_inter.build(None)
435
+ if getattr(self, "norm_post_tokens", None) is not None:
436
+ with tf.name_scope(self.norm_post_tokens.name):
437
+ self.norm_post_tokens.build([None, None, self.config.hidden_size])
438
+ if getattr(self, "norm_x", None) is not None:
439
+ with tf.name_scope(self.norm_x.name):
440
+ self.norm_x.build([None, None, self.config.hidden_size])
441
+ if getattr(self, "pre_assign_attn", None) is not None:
442
+ with tf.name_scope(self.pre_assign_attn.name):
443
+ self.pre_assign_attn.build(None)
444
+ if getattr(self, "assign", None) is not None:
445
+ with tf.name_scope(self.assign.name):
446
+ self.assign.build(None)
447
+ if getattr(self, "norm_new_x", None) is not None:
448
+ with tf.name_scope(self.norm_new_x.name):
449
+ self.norm_new_x.build([None, None, self.config.hidden_size])
450
+ if getattr(self, "mlp_channels", None) is not None:
451
+ with tf.name_scope(self.mlp_channels.name):
452
+ self.mlp_channels.build(None)
453
+
454
+
455
+ # Adapted from transformers.models.vit.modeling_tf_vit.TFViTPatchEmbeddings with ViT->GroupViT
456
+ class TFGroupViTPatchEmbeddings(keras.layers.Layer):
457
+ """
458
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
459
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
460
+ Transformer.
461
+ """
462
+
463
+ def __init__(self, config: GroupViTConfig, **kwargs):
464
+ super().__init__(**kwargs)
465
+ image_size, patch_size = config.image_size, config.patch_size
466
+ num_channels = config.num_channels
467
+ # hidden_size is a member as it will be required in the call method
468
+ self.hidden_size = config.hidden_size
469
+
470
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
471
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
472
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
473
+ self.image_size = image_size
474
+ self.patch_size = patch_size
475
+ self.num_patches = num_patches
476
+ self.num_channels = num_channels
477
+ self.config = config
478
+
479
+ self.projection = keras.layers.Conv2D(
480
+ filters=self.hidden_size,
481
+ kernel_size=patch_size,
482
+ strides=patch_size,
483
+ padding="valid",
484
+ data_format="channels_last",
485
+ use_bias=True,
486
+ kernel_initializer=get_initializer(self.config.initializer_range),
487
+ bias_initializer="zeros",
488
+ name="projection",
489
+ )
490
+
491
+ def call(
492
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
493
+ ) -> tf.Tensor:
494
+ batch_size, num_channels, height, width = shape_list(pixel_values)
495
+ if tf.executing_eagerly() and num_channels != self.num_channels:
496
+ raise ValueError(
497
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
498
+ )
499
+ if (
500
+ not interpolate_pos_encoding
501
+ and tf.executing_eagerly()
502
+ and (height != self.image_size[0] or width != self.image_size[1])
503
+ ):
504
+ raise ValueError(
505
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
506
+ )
507
+
508
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
509
+ # So change the input format from `NCHW` to `NHWC`.
510
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
511
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
512
+
513
+ projection = self.projection(pixel_values)
514
+
515
+ # Change the 2D spatial dimensions to a single temporal dimension.
516
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
517
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
518
+ # In the TFGroupViTVisionEmbeddings the embeddings from this layer will be layer normalized
519
+ # LayerNormalization layer needs to have static last dimension (otherwise the test_keras_save_load fails with symbolic tensors)
520
+ # This is why we have used the hidden_size in the reshape method
521
+ embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size))
522
+
523
+ return embeddings
524
+
525
+ def build(self, input_shape=None):
526
+ if self.built:
527
+ return
528
+ self.built = True
529
+ if getattr(self, "projection", None) is not None:
530
+ with tf.name_scope(self.projection.name):
531
+ self.projection.build([None, None, None, self.num_channels])
532
+
533
+
534
+ # Adapted from transformers.vit.modeling_tf_vit.TFViTEmbeddings
535
+ class TFGroupViTVisionEmbeddings(keras.layers.Layer):
536
+ """
537
+ Construct the position and patch embeddings.
538
+
539
+ """
540
+
541
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
542
+ super().__init__(**kwargs)
543
+
544
+ self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name="patch_embeddings")
545
+ self.dropout = keras.layers.Dropout(rate=config.dropout, name="dropout")
546
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
547
+ self.config = config
548
+
549
+ def build(self, input_shape=None):
550
+ num_patches = self.patch_embeddings.num_patches
551
+ self.position_embeddings = self.add_weight(
552
+ shape=(1, num_patches, self.config.hidden_size),
553
+ initializer="zeros",
554
+ trainable=True,
555
+ name="position_embeddings",
556
+ )
557
+
558
+ if self.built:
559
+ return
560
+ self.built = True
561
+ if getattr(self, "patch_embeddings", None) is not None:
562
+ with tf.name_scope(self.patch_embeddings.name):
563
+ self.patch_embeddings.build(None)
564
+ if getattr(self, "dropout", None) is not None:
565
+ with tf.name_scope(self.dropout.name):
566
+ self.dropout.build(None)
567
+ if getattr(self, "layernorm", None) is not None:
568
+ with tf.name_scope(self.layernorm.name):
569
+ self.layernorm.build([None, None, self.config.hidden_size])
570
+
571
+ def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor:
572
+ """
573
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
574
+ resolution images.
575
+
576
+ Source:
577
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
578
+ """
579
+
580
+ batch_size, num_patches, dim = shape_list(embeddings)
581
+ num_positions = shape_list(self.position_embeddings)[1]
582
+
583
+ if num_patches == num_positions and height == width:
584
+ return self.position_embeddings
585
+ patch_pos_embed = self.position_embeddings
586
+ h0 = height // self.config.patch_size
587
+ w0 = width // self.config.patch_size
588
+ patch_pos_embed = tf.image.resize(
589
+ images=tf.reshape(
590
+ patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
591
+ ),
592
+ size=(h0, w0),
593
+ method="bicubic",
594
+ )
595
+ patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim))
596
+ return patch_pos_embed
597
+
598
+ def call(
599
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
600
+ ) -> tf.Tensor:
601
+ _, _, height, width = shape_list(pixel_values)
602
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
603
+ embeddings = self.layernorm(embeddings)
604
+
605
+ # add positional encoding to each token
606
+ if interpolate_pos_encoding:
607
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
608
+ else:
609
+ embeddings = embeddings + self.position_embeddings
610
+
611
+ embeddings = self.dropout(embeddings)
612
+
613
+ return embeddings
614
+
615
+
616
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->GroupViT
617
+ class TFGroupViTTextEmbeddings(keras.layers.Layer):
618
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
619
+ super().__init__(**kwargs)
620
+
621
+ self.embed_dim = config.hidden_size
622
+
623
+ self.config = config
624
+
625
+ def build(self, input_shape: tf.TensorShape = None):
626
+ with tf.name_scope("token_embedding"):
627
+ self.weight = self.add_weight(
628
+ shape=(self.config.vocab_size, self.embed_dim),
629
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
630
+ trainable=True,
631
+ name="weight",
632
+ )
633
+
634
+ with tf.name_scope("position_embedding"):
635
+ self.position_embedding = self.add_weight(
636
+ shape=(self.config.max_position_embeddings, self.embed_dim),
637
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
638
+ trainable=True,
639
+ name="embeddings",
640
+ )
641
+
642
+ super().build(input_shape)
643
+
644
+ def call(
645
+ self,
646
+ input_ids: tf.Tensor = None,
647
+ position_ids: tf.Tensor = None,
648
+ inputs_embeds: tf.Tensor = None,
649
+ ) -> tf.Tensor:
650
+ """
651
+ Applies embedding based on inputs tensor.
652
+
653
+ Returns:
654
+ final_embeddings (`tf.Tensor`): output embedding tensor.
655
+ """
656
+ if input_ids is None and inputs_embeds is None:
657
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
658
+
659
+ if inputs_embeds is None:
660
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
661
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
662
+
663
+ input_shape = shape_list(inputs_embeds)[:-1]
664
+
665
+ if position_ids is None:
666
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
667
+
668
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
669
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
670
+ final_embeddings = inputs_embeds + position_embeds
671
+
672
+ return final_embeddings
673
+
674
+
675
+ class TFGroupViTStage(keras.layers.Layer):
676
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
677
+
678
+ def __init__(
679
+ self,
680
+ config: GroupViTVisionConfig,
681
+ depth: int,
682
+ num_prev_group_token: int,
683
+ num_group_token: int,
684
+ num_output_group: int,
685
+ **kwargs,
686
+ ):
687
+ super().__init__(**kwargs)
688
+ self.config = config
689
+ self.depth = depth
690
+ self.num_group_token = num_group_token
691
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(depth)]
692
+
693
+ if num_group_token > 0:
694
+ self.downsample = TFGroupViTTokenAssign(
695
+ config=config,
696
+ num_group_token=num_group_token,
697
+ num_output_group=num_output_group,
698
+ name="downsample",
699
+ )
700
+ else:
701
+ self.downsample = None
702
+
703
+ if num_prev_group_token > 0 and num_group_token > 0:
704
+ self.group_projector = [
705
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="group_projector.0"),
706
+ TFGroupViTMixerMLP(
707
+ config, num_prev_group_token, config.hidden_size // 2, num_group_token, name="group_projector.1"
708
+ ),
709
+ ]
710
+ else:
711
+ self.group_projector = None
712
+
713
+ def build(self, input_shape=None):
714
+ if self.num_group_token > 0:
715
+ self.group_token = self.add_weight(
716
+ shape=(1, self.num_group_token, self.config.hidden_size),
717
+ initializer="zeros",
718
+ trainable=True,
719
+ name="group_token",
720
+ )
721
+ else:
722
+ self.group_token = None
723
+
724
+ if self.built:
725
+ return
726
+ self.built = True
727
+ if getattr(self, "downsample", None) is not None:
728
+ with tf.name_scope(self.downsample.name):
729
+ self.downsample.build(None)
730
+ if getattr(self, "layers", None) is not None:
731
+ for layer in self.layers:
732
+ with tf.name_scope(layer.name):
733
+ layer.build(None)
734
+ if getattr(self, "group_projector", None) is not None:
735
+ with tf.name_scope(self.group_projector[0].name):
736
+ self.group_projector[0].build([None, None, self.config.hidden_size])
737
+ with tf.name_scope(self.group_projector[1].name):
738
+ self.group_projector[1].build(None)
739
+
740
+ @property
741
+ def with_group_token(self):
742
+ return self.group_token is not None
743
+
744
+ def split_x(self, x: tf.Tensor) -> tf.Tensor:
745
+ if self.with_group_token:
746
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
747
+ else:
748
+ return x, None
749
+
750
+ def concat_x(self, x: tf.Tensor, group_token: tf.Tensor | None = None) -> tf.Tensor:
751
+ if group_token is None:
752
+ return x
753
+ return tf.concat([x, group_token], axis=1)
754
+
755
+ def call(
756
+ self,
757
+ hidden_states: tf.Tensor,
758
+ prev_group_token: tf.Tensor | None = None,
759
+ output_attentions: bool = False,
760
+ training: bool = False,
761
+ ) -> Tuple[tf.Tensor]:
762
+ """
763
+ Args:
764
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
765
+ attention_mask (`tf.Tensor`): attention mask of size
766
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
767
+ `(config.encoder_attention_heads,)`.
768
+ output_attentions (`bool`, *optional*):
769
+ Whether or not to return the grouping tensors of Grouping block.
770
+ """
771
+ if self.with_group_token:
772
+ group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1))
773
+ if self.group_projector is not None:
774
+ for layer in self.group_projector:
775
+ prev_group_token = layer(prev_group_token)
776
+ group_token = group_token + prev_group_token
777
+ else:
778
+ group_token = None
779
+
780
+ x = hidden_states
781
+
782
+ cat_x = self.concat_x(x, group_token)
783
+ for layer in self.layers:
784
+ layer_out = layer(
785
+ cat_x,
786
+ attention_mask=None,
787
+ causal_attention_mask=None,
788
+ output_attentions=None,
789
+ )
790
+ cat_x = layer_out[0]
791
+
792
+ x, group_token = self.split_x(cat_x)
793
+
794
+ attention = None
795
+ if self.downsample is not None:
796
+ x, attention = self.downsample(x, group_token)
797
+
798
+ outputs = (x, group_token)
799
+ if output_attentions:
800
+ outputs = outputs + (attention,)
801
+
802
+ return outputs
803
+
804
+
805
+ class TFGroupViTMLP(keras.layers.Layer):
806
+ def __init__(
807
+ self,
808
+ config: GroupViTVisionConfig,
809
+ hidden_size: Optional[int] = None,
810
+ intermediate_size: Optional[int] = None,
811
+ output_size: Optional[int] = None,
812
+ **kwargs,
813
+ ):
814
+ super().__init__(**kwargs)
815
+ self.config = config
816
+ self.activation_fn = get_tf_activation(config.hidden_act)
817
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
818
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
819
+ output_size = output_size if output_size is not None else hidden_size
820
+ self.fc1 = keras.layers.Dense(intermediate_size, name="fc1")
821
+ self.fc2 = keras.layers.Dense(output_size, name="fc2")
822
+ self.intermediate_size = intermediate_size
823
+ self.hidden_size = hidden_size
824
+
825
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
826
+ hidden_states = self.fc1(hidden_states)
827
+ hidden_states = self.activation_fn(hidden_states)
828
+ hidden_states = self.fc2(hidden_states)
829
+ return hidden_states
830
+
831
+ def build(self, input_shape=None):
832
+ if self.built:
833
+ return
834
+ self.built = True
835
+ if getattr(self, "fc1", None) is not None:
836
+ with tf.name_scope(self.fc1.name):
837
+ self.fc1.build([None, None, self.hidden_size])
838
+ if getattr(self, "fc2", None) is not None:
839
+ with tf.name_scope(self.fc2.name):
840
+ self.fc2.build([None, None, self.intermediate_size])
841
+
842
+
843
+ class TFGroupViTMixerMLP(TFGroupViTMLP):
844
+ def call(self, x, training: bool = False):
845
+ x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1)))
846
+ return tf.transpose(x, perm=(0, 2, 1))
847
+
848
+
849
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPAttention
850
+ class TFGroupViTAttention(keras.layers.Layer):
851
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
852
+
853
+ def __init__(self, config: GroupViTConfig, **kwargs):
854
+ super().__init__(**kwargs)
855
+
856
+ self.embed_dim = config.hidden_size
857
+ self.num_attention_heads = config.num_attention_heads
858
+ self.attention_head_size = self.embed_dim // self.num_attention_heads
859
+ if self.attention_head_size * self.num_attention_heads != self.embed_dim:
860
+ raise ValueError(
861
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
862
+ f" {self.num_attention_heads})."
863
+ )
864
+
865
+ factor = config.initializer_factor
866
+ in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
867
+ out_proj_std = (self.embed_dim**-0.5) * factor
868
+
869
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
870
+
871
+ self.q_proj = keras.layers.Dense(
872
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj"
873
+ )
874
+ self.k_proj = keras.layers.Dense(
875
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj"
876
+ )
877
+ self.v_proj = keras.layers.Dense(
878
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj"
879
+ )
880
+
881
+ self.dropout = keras.layers.Dropout(rate=config.attention_dropout)
882
+
883
+ self.out_proj = keras.layers.Dense(
884
+ units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj"
885
+ )
886
+
887
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores
888
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
889
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
890
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
891
+
892
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
893
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
894
+
895
+ def call(
896
+ self,
897
+ hidden_states: tf.Tensor,
898
+ attention_mask: tf.Tensor = None,
899
+ causal_attention_mask: tf.Tensor = None,
900
+ output_attentions: bool = None,
901
+ encoder_hidden_states: tf.Tensor = None,
902
+ training: bool = False,
903
+ ) -> Tuple[tf.Tensor]:
904
+ """Input shape: Batch x Time x Channel"""
905
+
906
+ batch_size = shape_list(hidden_states)[0]
907
+ is_cross_attention = encoder_hidden_states is not None
908
+
909
+ mixed_query_layer = self.q_proj(inputs=hidden_states)
910
+ if is_cross_attention:
911
+ mixed_key_layer = self.k_proj(inputs=encoder_hidden_states)
912
+ mixed_value_layer = self.v_proj(inputs=encoder_hidden_states)
913
+ else:
914
+ mixed_key_layer = self.k_proj(inputs=hidden_states)
915
+ mixed_value_layer = self.v_proj(inputs=hidden_states)
916
+
917
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
918
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
919
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
920
+
921
+ # Take the dot product between "query" and "key" to get the raw attention scores.
922
+ # (batch size, num_heads, seq_len_q, seq_len_k)
923
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
924
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
925
+ attention_scores = tf.divide(attention_scores, dk)
926
+
927
+ # apply the causal_attention_mask first
928
+ if causal_attention_mask is not None:
929
+ # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function)
930
+ attention_scores = tf.add(attention_scores, causal_attention_mask)
931
+
932
+ if attention_mask is not None:
933
+ # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function)
934
+ attention_scores = tf.add(attention_scores, attention_mask)
935
+
936
+ # Normalize the attention scores to probabilities.
937
+ _attention_probs = stable_softmax(logits=attention_scores, axis=-1)
938
+
939
+ # This is actually dropping out entire tokens to attend to, which might
940
+ # seem a bit unusual, but is taken from the original Transformer paper.
941
+ attention_probs = self.dropout(inputs=_attention_probs)
942
+
943
+ attention_output = tf.matmul(attention_probs, value_layer)
944
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
945
+
946
+ # (batch_size, seq_len_q, embed_dim)
947
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim))
948
+
949
+ attention_output = self.out_proj(attention_output)
950
+ # In TFBert, attention weights are returned after dropout.
951
+ # However, in CLIP, they are returned before dropout.
952
+ outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,)
953
+
954
+ return outputs
955
+
956
+ def build(self, input_shape=None):
957
+ if self.built:
958
+ return
959
+ self.built = True
960
+ if getattr(self, "q_proj", None) is not None:
961
+ with tf.name_scope(self.q_proj.name):
962
+ self.q_proj.build([None, None, self.embed_dim])
963
+ if getattr(self, "k_proj", None) is not None:
964
+ with tf.name_scope(self.k_proj.name):
965
+ self.k_proj.build([None, None, self.embed_dim])
966
+ if getattr(self, "v_proj", None) is not None:
967
+ with tf.name_scope(self.v_proj.name):
968
+ self.v_proj.build([None, None, self.embed_dim])
969
+ if getattr(self, "out_proj", None) is not None:
970
+ with tf.name_scope(self.out_proj.name):
971
+ self.out_proj.build([None, None, self.embed_dim])
972
+
973
+
974
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPEncoderLayer with CLIP->GroupViT
975
+ class TFGroupViTEncoderLayer(keras.layers.Layer):
976
+ def __init__(self, config: GroupViTConfig, **kwargs):
977
+ super().__init__(**kwargs)
978
+
979
+ self.embed_dim = config.hidden_size
980
+ self.self_attn = TFGroupViTAttention(config, name="self_attn")
981
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
982
+ self.mlp = TFGroupViTMLP(config, name="mlp")
983
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
984
+
985
+ def call(
986
+ self,
987
+ hidden_states: tf.Tensor,
988
+ attention_mask: tf.Tensor,
989
+ causal_attention_mask: tf.Tensor,
990
+ output_attentions: bool,
991
+ training: bool = False,
992
+ ) -> Tuple[tf.Tensor]:
993
+ """
994
+ Args:
995
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
996
+ attention_mask (`tf.Tensor`): attention mask of size
997
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
998
+ causal_attention_mask (`tf.Tensor`): causal attention mask of size
999
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
1000
+ output_attentions (`bool`):
1001
+ Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned
1002
+ tensors for more detail.
1003
+ """
1004
+ residual = hidden_states
1005
+
1006
+ hidden_states = self.layer_norm1(inputs=hidden_states)
1007
+ attention_outputs = self.self_attn(
1008
+ hidden_states=hidden_states,
1009
+ attention_mask=attention_mask,
1010
+ causal_attention_mask=causal_attention_mask,
1011
+ output_attentions=output_attentions,
1012
+ training=training,
1013
+ )
1014
+ hidden_states = attention_outputs[0]
1015
+ hidden_states = residual + hidden_states
1016
+
1017
+ residual = hidden_states
1018
+ hidden_states = self.layer_norm2(inputs=hidden_states)
1019
+ hidden_states = self.mlp(hidden_states=hidden_states)
1020
+ hidden_states = residual + hidden_states
1021
+
1022
+ outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them
1023
+
1024
+ return outputs
1025
+
1026
+ def build(self, input_shape=None):
1027
+ if self.built:
1028
+ return
1029
+ self.built = True
1030
+ if getattr(self, "self_attn", None) is not None:
1031
+ with tf.name_scope(self.self_attn.name):
1032
+ self.self_attn.build(None)
1033
+ if getattr(self, "layer_norm1", None) is not None:
1034
+ with tf.name_scope(self.layer_norm1.name):
1035
+ self.layer_norm1.build([None, None, self.embed_dim])
1036
+ if getattr(self, "mlp", None) is not None:
1037
+ with tf.name_scope(self.mlp.name):
1038
+ self.mlp.build(None)
1039
+ if getattr(self, "layer_norm2", None) is not None:
1040
+ with tf.name_scope(self.layer_norm2.name):
1041
+ self.layer_norm2.build([None, None, self.embed_dim])
1042
+
1043
+
1044
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFGroupViTTextEncoder
1045
+ class TFGroupViTTextEncoder(keras.layers.Layer):
1046
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
1047
+ super().__init__(**kwargs)
1048
+
1049
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
1050
+
1051
+ def call(
1052
+ self,
1053
+ hidden_states,
1054
+ attention_mask: tf.Tensor,
1055
+ causal_attention_mask: tf.Tensor,
1056
+ output_attentions: bool,
1057
+ output_hidden_states: bool,
1058
+ return_dict: bool,
1059
+ training: bool = False,
1060
+ ) -> Union[Tuple, TFBaseModelOutput]:
1061
+ encoder_states = () if output_hidden_states else None
1062
+ all_attentions = () if output_attentions else None
1063
+
1064
+ for idx, encoder_layer in enumerate(self.layers):
1065
+ if output_hidden_states:
1066
+ encoder_states = encoder_states + (hidden_states,)
1067
+
1068
+ layer_outputs = encoder_layer(
1069
+ hidden_states,
1070
+ attention_mask,
1071
+ causal_attention_mask,
1072
+ output_attentions=output_attentions,
1073
+ )
1074
+ hidden_states = layer_outputs[0]
1075
+
1076
+ if output_attentions:
1077
+ all_attentions = all_attentions + (layer_outputs[1],)
1078
+
1079
+ if output_hidden_states:
1080
+ encoder_states = encoder_states + (hidden_states,)
1081
+
1082
+ if not return_dict:
1083
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1084
+ return TFBaseModelOutput(
1085
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1086
+ )
1087
+
1088
+ def build(self, input_shape=None):
1089
+ if self.built:
1090
+ return
1091
+ self.built = True
1092
+ if getattr(self, "layers", None) is not None:
1093
+ for layer in self.layers:
1094
+ with tf.name_scope(layer.name):
1095
+ layer.build(None)
1096
+
1097
+
1098
+ class TFGroupViTVisionEncoder(keras.layers.Layer):
1099
+ def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None:
1100
+ super().__init__(**kwargs)
1101
+
1102
+ self.stages = [
1103
+ TFGroupViTStage(
1104
+ config=config,
1105
+ depth=config.depths[i],
1106
+ num_group_token=config.num_group_tokens[i],
1107
+ num_output_group=config.num_output_groups[i],
1108
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
1109
+ name=f"stages_._{i}",
1110
+ )
1111
+ for i in range(len(config.depths))
1112
+ ]
1113
+
1114
+ def call(
1115
+ self,
1116
+ hidden_states: tf.Tensor,
1117
+ output_hidden_states: bool,
1118
+ output_attentions: bool,
1119
+ return_dict: bool,
1120
+ training: bool = False,
1121
+ ) -> Union[tuple, TFBaseModelOutput]:
1122
+ all_hidden_states = () if output_hidden_states else None
1123
+ all_groupings = () if output_attentions else None
1124
+
1125
+ group_tokens = None
1126
+
1127
+ for stage in self.stages:
1128
+ if output_hidden_states:
1129
+ all_hidden_states = all_hidden_states + (hidden_states,)
1130
+
1131
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
1132
+
1133
+ hidden_states = layer_outputs[0]
1134
+ group_tokens = layer_outputs[1]
1135
+
1136
+ if output_attentions and layer_outputs[2] is not None:
1137
+ all_groupings = all_groupings + (layer_outputs[2],)
1138
+
1139
+ if output_hidden_states:
1140
+ all_hidden_states = all_hidden_states + (hidden_states,)
1141
+
1142
+ if not return_dict:
1143
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
1144
+ return TFBaseModelOutput(
1145
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
1146
+ )
1147
+
1148
+ def build(self, input_shape=None):
1149
+ if self.built:
1150
+ return
1151
+ self.built = True
1152
+ if getattr(self, "stages", None) is not None:
1153
+ for layer in self.stages:
1154
+ with tf.name_scope(layer.name):
1155
+ layer.build(None)
1156
+
1157
+
1158
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder
1159
+ class TFGroupViTTextTransformer(keras.layers.Layer):
1160
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
1161
+ super().__init__(**kwargs)
1162
+
1163
+ self.embeddings = TFGroupViTTextEmbeddings(config, name="embeddings")
1164
+ self.encoder = TFGroupViTTextEncoder(config, name="encoder")
1165
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
1166
+
1167
+ # For `pooled_output` computation
1168
+ self.eos_token_id = config.eos_token_id
1169
+ self.embed_dim = config.hidden_size
1170
+
1171
+ def call(
1172
+ self,
1173
+ input_ids: TFModelInputType,
1174
+ attention_mask: tf.Tensor,
1175
+ position_ids: tf.Tensor,
1176
+ output_attentions: bool,
1177
+ output_hidden_states: bool,
1178
+ return_dict: bool,
1179
+ training: bool = False,
1180
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1181
+ input_shape = shape_list(input_ids)
1182
+
1183
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids)
1184
+
1185
+ batch_size, seq_length = input_shape
1186
+ # CLIP's text model uses causal mask, prepare it here.
1187
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
1188
+ causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype)
1189
+
1190
+ # check attention mask and invert
1191
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1192
+ attention_mask = _expand_mask(attention_mask)
1193
+
1194
+ encoder_outputs = self.encoder(
1195
+ hidden_states=embedding_output,
1196
+ attention_mask=attention_mask,
1197
+ causal_attention_mask=causal_attention_mask,
1198
+ output_attentions=output_attentions,
1199
+ output_hidden_states=output_hidden_states,
1200
+ return_dict=return_dict,
1201
+ training=training,
1202
+ )
1203
+
1204
+ sequence_output = encoder_outputs[0]
1205
+ sequence_output = self.final_layer_norm(inputs=sequence_output)
1206
+
1207
+ if self.eos_token_id == 2:
1208
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
1209
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
1210
+ # ------------------------------------------------------------
1211
+ # text_embeds.shape = [batch_size, n_ctx, transformer.width]
1212
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
1213
+ pooled_output = tf.gather_nd(
1214
+ params=sequence_output,
1215
+ indices=tf.stack(
1216
+ values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1
1217
+ ),
1218
+ )
1219
+ else:
1220
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
1221
+ pooled_output = tf.gather_nd(
1222
+ params=sequence_output,
1223
+ indices=tf.stack(
1224
+ values=(
1225
+ tf.range(input_shape[0], dtype=tf.int64),
1226
+ tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1),
1227
+ ),
1228
+ axis=1,
1229
+ ),
1230
+ )
1231
+
1232
+ if not return_dict:
1233
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1234
+
1235
+ return TFBaseModelOutputWithPooling(
1236
+ last_hidden_state=sequence_output,
1237
+ pooler_output=pooled_output,
1238
+ hidden_states=encoder_outputs.hidden_states,
1239
+ attentions=encoder_outputs.attentions,
1240
+ )
1241
+
1242
+ def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32):
1243
+ # It is possible with an unspecified sequence length for seq_length to be
1244
+ # a runtime value, which is unsupported by tf.constant. Per the TensorFlow
1245
+ # docs, tf.fill can handle runtime dynamic shapes:
1246
+ # https://www.tensorflow.org/api_docs/python/tf/fill
1247
+ diag = tf.cast(tf.fill((seq_length,), 0.0), dtype)
1248
+
1249
+ # set an additive 2D attention mask with all places being masked
1250
+ to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype)
1251
+
1252
+ # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked)
1253
+ # TIP: think the 2D matrix as the space of (query_seq, key_seq)
1254
+ to_mask = tf.linalg.band_part(to_mask, 0, -1)
1255
+ # to_mask = tf.linalg.band_part(to_mask, -1, 0)
1256
+ to_mask = tf.linalg.set_diag(to_mask, diagonal=diag)
1257
+
1258
+ return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length))
1259
+
1260
+ def build(self, input_shape=None):
1261
+ if self.built:
1262
+ return
1263
+ self.built = True
1264
+ if getattr(self, "embeddings", None) is not None:
1265
+ with tf.name_scope(self.embeddings.name):
1266
+ self.embeddings.build(None)
1267
+ if getattr(self, "encoder", None) is not None:
1268
+ with tf.name_scope(self.encoder.name):
1269
+ self.encoder.build(None)
1270
+ if getattr(self, "final_layer_norm", None) is not None:
1271
+ with tf.name_scope(self.final_layer_norm.name):
1272
+ self.final_layer_norm.build([None, None, self.embed_dim])
1273
+
1274
+
1275
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPVisionTransformer
1276
+ class TFGroupViTVisionTransformer(keras.layers.Layer):
1277
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
1278
+ super().__init__(**kwargs)
1279
+
1280
+ self.embeddings = TFGroupViTVisionEmbeddings(config, name="embeddings")
1281
+ self.encoder = TFGroupViTVisionEncoder(config, name="encoder")
1282
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
1283
+ self.embed_dim = config.hidden_size
1284
+
1285
+ def call(
1286
+ self,
1287
+ pixel_values: TFModelInputType,
1288
+ output_attentions: bool,
1289
+ output_hidden_states: bool,
1290
+ return_dict: bool,
1291
+ training: bool = False,
1292
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
1293
+ embedding_output = self.embeddings(pixel_values)
1294
+
1295
+ encoder_outputs = self.encoder(
1296
+ hidden_states=embedding_output,
1297
+ output_hidden_states=output_hidden_states,
1298
+ output_attentions=output_attentions,
1299
+ return_dict=return_dict,
1300
+ )
1301
+
1302
+ last_hidden_state = encoder_outputs[0]
1303
+
1304
+ # normalize the last hidden state
1305
+ last_hidden_state = self.layernorm(last_hidden_state)
1306
+ pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1)
1307
+
1308
+ if not return_dict:
1309
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1310
+
1311
+ return TFBaseModelOutputWithPooling(
1312
+ last_hidden_state=last_hidden_state,
1313
+ pooler_output=pooled_output,
1314
+ hidden_states=encoder_outputs.hidden_states,
1315
+ attentions=encoder_outputs.attentions,
1316
+ )
1317
+
1318
+ def build(self, input_shape=None):
1319
+ if self.built:
1320
+ return
1321
+ self.built = True
1322
+ if getattr(self, "embeddings", None) is not None:
1323
+ with tf.name_scope(self.embeddings.name):
1324
+ self.embeddings.build(None)
1325
+ if getattr(self, "encoder", None) is not None:
1326
+ with tf.name_scope(self.encoder.name):
1327
+ self.encoder.build(None)
1328
+ if getattr(self, "layernorm", None) is not None:
1329
+ with tf.name_scope(self.layernorm.name):
1330
+ self.layernorm.build([None, None, self.embed_dim])
1331
+
1332
+
1333
+ @keras_serializable
1334
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextMainLayer with CLIP->GroupViT
1335
+ class TFGroupViTTextMainLayer(keras.layers.Layer):
1336
+ config_class = GroupViTTextConfig
1337
+
1338
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
1339
+ super().__init__(**kwargs)
1340
+ self.config = config
1341
+ self.text_model = TFGroupViTTextTransformer(config, name="text_model")
1342
+
1343
+ def get_input_embeddings(self) -> keras.layers.Layer:
1344
+ return self.text_model.embeddings
1345
+
1346
+ def set_input_embeddings(self, value: tf.Variable):
1347
+ self.text_model.embeddings.weight = value
1348
+ self.text_model.embeddings.vocab_size = shape_list(value)[0]
1349
+
1350
+ @unpack_inputs
1351
+ def call(
1352
+ self,
1353
+ input_ids: TFModelInputType | None = None,
1354
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1355
+ position_ids: np.ndarray | tf.Tensor | None = None,
1356
+ output_attentions: Optional[bool] = None,
1357
+ output_hidden_states: Optional[bool] = None,
1358
+ return_dict: Optional[bool] = None,
1359
+ training: bool = False,
1360
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1361
+ if input_ids is None:
1362
+ raise ValueError("You have to specify input_ids")
1363
+
1364
+ input_shape = shape_list(input_ids)
1365
+
1366
+ if attention_mask is None:
1367
+ attention_mask = tf.fill(dims=input_shape, value=1)
1368
+
1369
+ text_model_outputs = self.text_model(
1370
+ input_ids=input_ids,
1371
+ attention_mask=attention_mask,
1372
+ position_ids=position_ids,
1373
+ output_attentions=output_attentions,
1374
+ output_hidden_states=output_hidden_states,
1375
+ return_dict=return_dict,
1376
+ training=training,
1377
+ )
1378
+
1379
+ return text_model_outputs
1380
+
1381
+ def build(self, input_shape=None):
1382
+ if self.built:
1383
+ return
1384
+ self.built = True
1385
+ if getattr(self, "text_model", None) is not None:
1386
+ with tf.name_scope(self.text_model.name):
1387
+ self.text_model.build(None)
1388
+
1389
+
1390
+ @keras_serializable
1391
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPVisionMainLayer with CLIP->GroupViT
1392
+ class TFGroupViTVisionMainLayer(keras.layers.Layer):
1393
+ config_class = GroupViTVisionConfig
1394
+
1395
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
1396
+ super().__init__(**kwargs)
1397
+ self.config = config
1398
+ self.vision_model = TFGroupViTVisionTransformer(config, name="vision_model")
1399
+
1400
+ def get_input_embeddings(self) -> keras.layers.Layer:
1401
+ return self.vision_model.embeddings
1402
+
1403
+ @unpack_inputs
1404
+ def call(
1405
+ self,
1406
+ pixel_values: TFModelInputType | None = None,
1407
+ output_attentions: Optional[bool] = None,
1408
+ output_hidden_states: Optional[bool] = None,
1409
+ return_dict: Optional[bool] = None,
1410
+ training: bool = False,
1411
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1412
+ if pixel_values is None:
1413
+ raise ValueError("You have to specify pixel_values")
1414
+
1415
+ vision_model_outputs = self.vision_model(
1416
+ pixel_values=pixel_values,
1417
+ output_attentions=output_attentions,
1418
+ output_hidden_states=output_hidden_states,
1419
+ return_dict=return_dict,
1420
+ training=training,
1421
+ )
1422
+
1423
+ return vision_model_outputs
1424
+
1425
+ def build(self, input_shape=None):
1426
+ if self.built:
1427
+ return
1428
+ self.built = True
1429
+ if getattr(self, "vision_model", None) is not None:
1430
+ with tf.name_scope(self.vision_model.name):
1431
+ self.vision_model.build(None)
1432
+
1433
+
1434
+ @keras_serializable
1435
+ # Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPMainLayer
1436
+ class TFGroupViTMainLayer(keras.layers.Layer):
1437
+ config_class = GroupViTConfig
1438
+
1439
+ def __init__(self, config: GroupViTConfig, **kwargs):
1440
+ super().__init__(**kwargs)
1441
+
1442
+ if not isinstance(config.text_config, GroupViTTextConfig):
1443
+ raise ValueError(
1444
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
1445
+ f" {type(config.text_config)}."
1446
+ )
1447
+
1448
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
1449
+ raise ValueError(
1450
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
1451
+ f" {type(config.vision_config)}."
1452
+ )
1453
+
1454
+ self.config = config
1455
+
1456
+ text_config = config.text_config
1457
+ vision_config = config.vision_config
1458
+
1459
+ self.projection_dim = config.projection_dim
1460
+ self.projection_intermediate_dim = config.projection_intermediate_dim
1461
+ self.text_embed_dim = text_config.hidden_size
1462
+ self.vision_embed_dim = vision_config.hidden_size
1463
+
1464
+ self.text_model = TFGroupViTTextTransformer(text_config, name="text_model")
1465
+ self.vision_model = TFGroupViTVisionTransformer(vision_config, name="vision_model")
1466
+
1467
+ self.visual_projection = [
1468
+ keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"),
1469
+ keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.9, epsilon=1e-5),
1470
+ keras.layers.ReLU(name="visual_projection.2"),
1471
+ keras.layers.Dense(self.projection_dim, name="visual_projection.3"),
1472
+ ]
1473
+ self.text_projection = [
1474
+ keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"),
1475
+ keras.layers.BatchNormalization(name="text_projection.1", momentum=0.9, epsilon=1e-5),
1476
+ keras.layers.ReLU(name="text_projection.2"),
1477
+ keras.layers.Dense(self.projection_dim, name="text_projection.3"),
1478
+ ]
1479
+
1480
+ def build(self, input_shape=None):
1481
+ self.logit_scale = self.add_weight(
1482
+ shape=(1,),
1483
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
1484
+ trainable=True,
1485
+ name="logit_scale",
1486
+ )
1487
+
1488
+ if self.built:
1489
+ return
1490
+ self.built = True
1491
+ if getattr(self, "text_model", None) is not None:
1492
+ with tf.name_scope(self.text_model.name):
1493
+ self.text_model.build(None)
1494
+ if getattr(self, "vision_model", None) is not None:
1495
+ with tf.name_scope(self.vision_model.name):
1496
+ self.vision_model.build(None)
1497
+ if getattr(self, "visual_projection", None) is not None:
1498
+ with tf.name_scope(self.visual_projection[0].name):
1499
+ self.visual_projection[0].build([None, None, None, self.vision_embed_dim])
1500
+ with tf.name_scope(self.visual_projection[1].name):
1501
+ self.visual_projection[1].build((None, self.projection_intermediate_dim))
1502
+ with tf.name_scope(self.visual_projection[3].name):
1503
+ self.visual_projection[3].build([None, None, None, self.projection_intermediate_dim])
1504
+ if getattr(self, "text_projection", None) is not None:
1505
+ with tf.name_scope(self.text_projection[0].name):
1506
+ self.text_projection[0].build([None, None, None, self.text_embed_dim])
1507
+ with tf.name_scope(self.text_projection[1].name):
1508
+ self.text_projection[1].build((None, self.projection_intermediate_dim))
1509
+ with tf.name_scope(self.text_projection[3].name):
1510
+ self.text_projection[3].build([None, None, None, self.projection_intermediate_dim])
1511
+
1512
+ @unpack_inputs
1513
+ def get_text_features(
1514
+ self,
1515
+ input_ids: TFModelInputType | None = None,
1516
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1517
+ position_ids: np.ndarray | tf.Tensor | None = None,
1518
+ output_attentions: Optional[bool] = None,
1519
+ output_hidden_states: Optional[bool] = None,
1520
+ return_dict: Optional[bool] = None,
1521
+ training: bool = False,
1522
+ ) -> tf.Tensor:
1523
+ if input_ids is None:
1524
+ raise ValueError("You have to specify either input_ids")
1525
+
1526
+ input_shape = shape_list(input_ids)
1527
+
1528
+ if attention_mask is None:
1529
+ attention_mask = tf.fill(dims=input_shape, value=1)
1530
+
1531
+ text_outputs = self.text_model(
1532
+ input_ids=input_ids,
1533
+ attention_mask=attention_mask,
1534
+ position_ids=position_ids,
1535
+ output_attentions=output_attentions,
1536
+ output_hidden_states=output_hidden_states,
1537
+ return_dict=return_dict,
1538
+ training=training,
1539
+ )
1540
+
1541
+ pooled_output = text_outputs[1]
1542
+ for layer in self.text_projection:
1543
+ pooled_output = layer(pooled_output)
1544
+
1545
+ text_features = pooled_output
1546
+ return text_features
1547
+
1548
+ @unpack_inputs
1549
+ def get_image_features(
1550
+ self,
1551
+ pixel_values: TFModelInputType | None = None,
1552
+ output_attentions: Optional[bool] = None,
1553
+ output_hidden_states: Optional[bool] = None,
1554
+ return_dict: Optional[bool] = None,
1555
+ training: bool = False,
1556
+ ) -> tf.Tensor:
1557
+ if pixel_values is None:
1558
+ raise ValueError("You have to specify pixel_values")
1559
+
1560
+ vision_outputs = self.vision_model(
1561
+ pixel_values=pixel_values,
1562
+ output_attentions=output_attentions,
1563
+ output_hidden_states=output_hidden_states,
1564
+ return_dict=return_dict,
1565
+ training=training,
1566
+ )
1567
+
1568
+ pooled_output = vision_outputs[1]
1569
+ for layer in self.visual_projection:
1570
+ pooled_output = layer(pooled_output)
1571
+
1572
+ image_features = pooled_output
1573
+ return image_features
1574
+
1575
+ @unpack_inputs
1576
+ def call(
1577
+ self,
1578
+ input_ids: TFModelInputType | None = None,
1579
+ pixel_values: TFModelInputType | None = None,
1580
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1581
+ position_ids: np.ndarray | tf.Tensor | None = None,
1582
+ return_loss: Optional[bool] = None,
1583
+ output_attentions: Optional[bool] = None,
1584
+ output_hidden_states: Optional[bool] = None,
1585
+ output_segmentation: Optional[bool] = None,
1586
+ return_dict: Optional[bool] = None,
1587
+ training: bool = False,
1588
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
1589
+ if input_ids is None:
1590
+ raise ValueError("You have to specify either input_ids")
1591
+ if pixel_values is None:
1592
+ raise ValueError("You have to specify pixel_values")
1593
+
1594
+ input_shape = shape_list(input_ids)
1595
+
1596
+ if attention_mask is None:
1597
+ attention_mask = tf.fill(dims=input_shape, value=1)
1598
+ if output_segmentation:
1599
+ output_attentions = True
1600
+ vision_outputs = self.vision_model(
1601
+ pixel_values=pixel_values,
1602
+ output_attentions=output_attentions,
1603
+ output_hidden_states=output_hidden_states,
1604
+ return_dict=return_dict,
1605
+ training=training,
1606
+ )
1607
+
1608
+ text_outputs = self.text_model(
1609
+ input_ids=input_ids,
1610
+ attention_mask=attention_mask,
1611
+ position_ids=position_ids,
1612
+ output_attentions=output_attentions,
1613
+ output_hidden_states=output_hidden_states,
1614
+ return_dict=return_dict,
1615
+ training=training,
1616
+ )
1617
+
1618
+ image_embeds = vision_outputs[1]
1619
+ for layer in self.visual_projection:
1620
+ image_embeds = layer(image_embeds)
1621
+
1622
+ text_embeds = text_outputs[1]
1623
+ for layer in self.text_projection:
1624
+ text_embeds = layer(text_embeds)
1625
+
1626
+ # normalized features
1627
+ image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True)
1628
+ text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True)
1629
+
1630
+ # cosine similarity as logits
1631
+ logit_scale = tf.math.exp(self.logit_scale)
1632
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
1633
+ logits_per_image = tf.transpose(logits_per_text)
1634
+
1635
+ seg_logits = None
1636
+ if output_segmentation:
1637
+ # grouped features
1638
+ # [batch_size_image, num_group, hidden_size]
1639
+ image_group_embeds = vision_outputs[0]
1640
+ # [batch_size_image*num_group, hidden_size]
1641
+ image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1]))
1642
+ for layer in self.visual_projection:
1643
+ image_group_embeds = layer(image_group_embeds)
1644
+ if output_hidden_states:
1645
+ attentions = vision_outputs[3]
1646
+ else:
1647
+ attentions = vision_outputs[2]
1648
+ # [batch_size_image, num_group, height, width]
1649
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
1650
+
1651
+ # normalized features
1652
+ image_group_embeds = image_group_embeds / tf.norm(
1653
+ tensor=image_group_embeds, ord="euclidean", axis=-1, keepdims=True
1654
+ )
1655
+ # [batch_size_image x num_group, batch_size_text]
1656
+ logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale
1657
+ # [batch_size_image, batch_size_text, num_group]
1658
+ logits_per_image_group = tf.reshape(
1659
+ logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0])
1660
+ )
1661
+ logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1))
1662
+
1663
+ # [batch_size_image, batch_size_text, height x width]
1664
+ flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1))
1665
+
1666
+ # [batch_size_image, batch_size_text, height, width]
1667
+ seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale
1668
+ seg_logits = tf.reshape(
1669
+ seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3])
1670
+ )
1671
+
1672
+ loss = None
1673
+ if return_loss:
1674
+ loss = groupvit_loss(logits_per_text)[None, ...]
1675
+
1676
+ if not return_dict:
1677
+ if seg_logits is not None:
1678
+ output = (
1679
+ logits_per_image,
1680
+ logits_per_text,
1681
+ seg_logits,
1682
+ text_embeds,
1683
+ image_embeds,
1684
+ text_outputs,
1685
+ vision_outputs,
1686
+ )
1687
+ else:
1688
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1689
+ return ((loss,) + output) if loss is not None else output
1690
+
1691
+ return TFGroupViTModelOutput(
1692
+ loss=loss,
1693
+ logits_per_image=logits_per_image,
1694
+ logits_per_text=logits_per_text,
1695
+ segmentation_logits=seg_logits,
1696
+ text_embeds=text_embeds,
1697
+ image_embeds=image_embeds,
1698
+ text_model_output=text_outputs,
1699
+ vision_model_output=vision_outputs,
1700
+ )
1701
+
1702
+
1703
+ class TFGroupViTPreTrainedModel(TFPreTrainedModel):
1704
+ """
1705
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1706
+ models.
1707
+ """
1708
+
1709
+ config_class = GroupViTConfig
1710
+ base_model_prefix = "groupvit"
1711
+
1712
+
1713
+ GROUPVIT_START_DOCSTRING = r"""
1714
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1715
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1716
+ etc.)
1717
+
1718
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1719
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1720
+ behavior.
1721
+
1722
+ <Tip>
1723
+
1724
+ TF 2.0 models accepts two formats as inputs:
1725
+
1726
+ - having all inputs as keyword arguments (like PyTorch models), or
1727
+ - having all inputs as a list, tuple or dict in the first positional arguments.
1728
+
1729
+ This second option is useful when using [`keras.Model.fit`] method which currently requires having all the
1730
+ tensors in the first argument of the model call function: `model(inputs)`.
1731
+
1732
+ If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
1733
+ first positional argument :
1734
+
1735
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1736
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1737
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1738
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1739
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1740
+
1741
+ </Tip>
1742
+
1743
+ Args:
1744
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
1745
+ Initializing with a config file does not load the weights associated with the model, only the
1746
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1747
+ """
1748
+
1749
+ GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
1750
+ Args:
1751
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1752
+ Indices of input sequence tokens in the vocabulary.
1753
+
1754
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1755
+ [`PreTrainedTokenizer.encode`] for details.
1756
+
1757
+ [What are input IDs?](../glossary#input-ids)
1758
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1759
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1760
+
1761
+ - 1 for tokens that are **not masked**,
1762
+ - 0 for tokens that are **masked**.
1763
+
1764
+ [What are attention masks?](../glossary#attention-mask)
1765
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1766
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1767
+ config.max_position_embeddings - 1]`.
1768
+
1769
+ [What are position IDs?](../glossary#position-ids)
1770
+ output_attentions (`bool`, *optional*):
1771
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1772
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1773
+ config will be used instead.
1774
+ output_hidden_states (`bool`, *optional*):
1775
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1776
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1777
+ used instead.
1778
+ return_dict (`bool`, *optional*):
1779
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1780
+ eager mode, in graph mode the value will always be set to True.
1781
+ training (`bool`, *optional*, defaults to `False``):
1782
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1783
+ behaviors between training and evaluation).
1784
+ """
1785
+
1786
+ GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
1787
+ Args:
1788
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
1789
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
1790
+ [`CLIPImageProcessor.__call__`] for details.
1791
+ output_attentions (`bool`, *optional*):
1792
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1793
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1794
+ config will be used instead.
1795
+ output_hidden_states (`bool`, *optional*):
1796
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1797
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1798
+ used instead.
1799
+ return_dict (`bool`, *optional*):
1800
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1801
+ eager mode, in graph mode the value will always be set to True.
1802
+ training (`bool`, *optional*, defaults to `False``):
1803
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1804
+ behaviors between training and evaluation).
1805
+ """
1806
+
1807
+ GROUPVIT_INPUTS_DOCSTRING = r"""
1808
+ Args:
1809
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1810
+ Indices of input sequence tokens in the vocabulary.
1811
+
1812
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1813
+ [`PreTrainedTokenizer.encode`] for details.
1814
+
1815
+ [What are input IDs?](../glossary#input-ids)
1816
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
1817
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
1818
+ [`CLIPImageProcessor.__call__`] for details.
1819
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1820
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1821
+
1822
+ - 1 for tokens that are **not masked**,
1823
+ - 0 for tokens that are **masked**.
1824
+
1825
+ [What are attention masks?](../glossary#attention-mask)
1826
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1827
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1828
+ config.max_position_embeddings - 1]`.
1829
+
1830
+ [What are position IDs?](../glossary#position-ids)
1831
+ return_loss (`bool`, *optional*):
1832
+ Whether or not to return the contrastive loss.
1833
+ output_attentions (`bool`, *optional*):
1834
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1835
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1836
+ config will be used instead.
1837
+ output_hidden_states (`bool`, *optional*):
1838
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1839
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1840
+ used instead.
1841
+ return_dict (`bool`, *optional*):
1842
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1843
+ eager mode, in graph mode the value will always be set to True.
1844
+ training (`bool`, *optional*, defaults to `False``):
1845
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1846
+ behaviors between training and evaluation).
1847
+ """
1848
+
1849
+
1850
+ class TFGroupViTTextModel(TFGroupViTPreTrainedModel):
1851
+ config_class = GroupViTTextConfig
1852
+ main_input_name = "input_ids"
1853
+
1854
+ def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs):
1855
+ super().__init__(config, *inputs, **kwargs)
1856
+
1857
+ self.groupvit = TFGroupViTTextMainLayer(config, name="groupvit")
1858
+
1859
+ @unpack_inputs
1860
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1861
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig)
1862
+ def call(
1863
+ self,
1864
+ input_ids: TFModelInputType | None = None,
1865
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1866
+ position_ids: np.ndarray | tf.Tensor | None = None,
1867
+ output_attentions: Optional[bool] = None,
1868
+ output_hidden_states: Optional[bool] = None,
1869
+ return_dict: Optional[bool] = None,
1870
+ training: bool = False,
1871
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1872
+ r"""
1873
+ Returns:
1874
+
1875
+ Examples:
1876
+
1877
+ ```python
1878
+ >>> from transformers import CLIPTokenizer, TFGroupViTTextModel
1879
+
1880
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
1881
+ >>> model = TFGroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1882
+
1883
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
1884
+
1885
+ >>> outputs = model(**inputs)
1886
+ >>> last_hidden_state = outputs.last_hidden_state
1887
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1888
+ ```"""
1889
+
1890
+ outputs = self.groupvit(
1891
+ input_ids=input_ids,
1892
+ attention_mask=attention_mask,
1893
+ position_ids=position_ids,
1894
+ output_attentions=output_attentions,
1895
+ output_hidden_states=output_hidden_states,
1896
+ return_dict=return_dict,
1897
+ training=training,
1898
+ )
1899
+
1900
+ return outputs
1901
+
1902
+ def build(self, input_shape=None):
1903
+ if self.built:
1904
+ return
1905
+ self.built = True
1906
+ if getattr(self, "groupvit", None) is not None:
1907
+ with tf.name_scope(self.groupvit.name):
1908
+ self.groupvit.build(None)
1909
+
1910
+
1911
+ class TFGroupViTVisionModel(TFGroupViTPreTrainedModel):
1912
+ config_class = GroupViTVisionConfig
1913
+ main_input_name = "pixel_values"
1914
+
1915
+ def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs):
1916
+ super().__init__(config, *inputs, **kwargs)
1917
+
1918
+ self.groupvit = TFGroupViTVisionMainLayer(config, name="groupvit")
1919
+
1920
+ @unpack_inputs
1921
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1922
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
1923
+ def call(
1924
+ self,
1925
+ pixel_values: TFModelInputType | None = None,
1926
+ output_attentions: Optional[bool] = None,
1927
+ output_hidden_states: Optional[bool] = None,
1928
+ return_dict: Optional[bool] = None,
1929
+ training: bool = False,
1930
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1931
+ r"""
1932
+ Returns:
1933
+
1934
+ Examples:
1935
+
1936
+ ```python
1937
+ >>> from PIL import Image
1938
+ >>> import requests
1939
+ >>> from transformers import AutoProcessor, TFGroupViTVisionModel
1940
+
1941
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1942
+ >>> model = TFGroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1943
+
1944
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1945
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1946
+
1947
+ >>> inputs = processor(images=image, return_tensors="tf")
1948
+
1949
+ >>> outputs = model(**inputs)
1950
+ >>> last_hidden_state = outputs.last_hidden_state
1951
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1952
+ ```"""
1953
+
1954
+ outputs = self.groupvit(
1955
+ pixel_values=pixel_values,
1956
+ output_attentions=output_attentions,
1957
+ output_hidden_states=output_hidden_states,
1958
+ return_dict=return_dict,
1959
+ training=training,
1960
+ )
1961
+
1962
+ return outputs
1963
+
1964
+ def build(self, input_shape=None):
1965
+ if self.built:
1966
+ return
1967
+ self.built = True
1968
+ if getattr(self, "groupvit", None) is not None:
1969
+ with tf.name_scope(self.groupvit.name):
1970
+ self.groupvit.build(None)
1971
+
1972
+
1973
+ @add_start_docstrings(GROUPVIT_START_DOCSTRING)
1974
+ class TFGroupViTModel(TFGroupViTPreTrainedModel):
1975
+ config_class = GroupViTConfig
1976
+
1977
+ def __init__(self, config: GroupViTConfig, *inputs, **kwargs):
1978
+ super().__init__(config, *inputs, **kwargs)
1979
+
1980
+ self.groupvit = TFGroupViTMainLayer(config, name="groupvit")
1981
+
1982
+ @unpack_inputs
1983
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1984
+ def get_text_features(
1985
+ self,
1986
+ input_ids: TFModelInputType | None = None,
1987
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1988
+ position_ids: np.ndarray | tf.Tensor | None = None,
1989
+ output_attentions: Optional[bool] = None,
1990
+ output_hidden_states: Optional[bool] = None,
1991
+ return_dict: Optional[bool] = None,
1992
+ training: bool = False,
1993
+ ) -> tf.Tensor:
1994
+ r"""
1995
+ Returns:
1996
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
1997
+ the projection layer to the pooled output of [`TFGroupViTTextModel`].
1998
+
1999
+ Examples:
2000
+
2001
+ ```python
2002
+ >>> from transformers import CLIPTokenizer, TFGroupViTModel
2003
+
2004
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
2005
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
2006
+
2007
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
2008
+ >>> text_features = model.get_text_features(**inputs)
2009
+ ```"""
2010
+
2011
+ text_features = self.groupvit.get_text_features(
2012
+ input_ids=input_ids,
2013
+ attention_mask=attention_mask,
2014
+ position_ids=position_ids,
2015
+ output_attentions=output_attentions,
2016
+ output_hidden_states=output_hidden_states,
2017
+ return_dict=return_dict,
2018
+ training=training,
2019
+ )
2020
+
2021
+ return text_features
2022
+
2023
+ @unpack_inputs
2024
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
2025
+ def get_image_features(
2026
+ self,
2027
+ pixel_values: TFModelInputType | None = None,
2028
+ output_attentions: Optional[bool] = None,
2029
+ output_hidden_states: Optional[bool] = None,
2030
+ return_dict: Optional[bool] = None,
2031
+ training: bool = False,
2032
+ ) -> tf.Tensor:
2033
+ r"""
2034
+ Returns:
2035
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
2036
+ the projection layer to the pooled output of [`TFGroupViTVisionModel`].
2037
+
2038
+ Examples:
2039
+
2040
+ ```python
2041
+ >>> from PIL import Image
2042
+ >>> import requests
2043
+ >>> from transformers import AutoProcessor, TFGroupViTModel
2044
+
2045
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
2046
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
2047
+
2048
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
2049
+ >>> image = Image.open(requests.get(url, stream=True).raw)
2050
+
2051
+ >>> inputs = processor(images=image, return_tensors="tf")
2052
+
2053
+ >>> image_features = model.get_image_features(**inputs)
2054
+ ```"""
2055
+
2056
+ image_features = self.groupvit.get_image_features(
2057
+ pixel_values=pixel_values,
2058
+ output_attentions=output_attentions,
2059
+ output_hidden_states=output_hidden_states,
2060
+ return_dict=return_dict,
2061
+ training=training,
2062
+ )
2063
+
2064
+ return image_features
2065
+
2066
+ @unpack_inputs
2067
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
2068
+ @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig)
2069
+ def call(
2070
+ self,
2071
+ input_ids: TFModelInputType | None = None,
2072
+ pixel_values: TFModelInputType | None = None,
2073
+ attention_mask: np.ndarray | tf.Tensor | None = None,
2074
+ position_ids: np.ndarray | tf.Tensor | None = None,
2075
+ return_loss: Optional[bool] = None,
2076
+ output_attentions: Optional[bool] = None,
2077
+ output_hidden_states: Optional[bool] = None,
2078
+ output_segmentation: Optional[bool] = None,
2079
+ return_dict: Optional[bool] = None,
2080
+ training: bool = False,
2081
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
2082
+ r"""
2083
+ Returns:
2084
+
2085
+ Examples:
2086
+
2087
+ ```python
2088
+ >>> from PIL import Image
2089
+ >>> import requests
2090
+ >>> from transformers import AutoProcessor, TFGroupViTModel
2091
+ >>> import tensorflow as tf
2092
+
2093
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
2094
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
2095
+
2096
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
2097
+ >>> image = Image.open(requests.get(url, stream=True).raw)
2098
+
2099
+ >>> inputs = processor(
2100
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
2101
+ ... )
2102
+
2103
+ >>> outputs = model(**inputs)
2104
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
2105
+ >>> probs = tf.math.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
2106
+ ```"""
2107
+
2108
+ outputs = self.groupvit(
2109
+ input_ids=input_ids,
2110
+ pixel_values=pixel_values,
2111
+ attention_mask=attention_mask,
2112
+ position_ids=position_ids,
2113
+ return_loss=return_loss,
2114
+ output_attentions=output_attentions,
2115
+ output_hidden_states=output_hidden_states,
2116
+ output_segmentation=output_segmentation,
2117
+ return_dict=return_dict,
2118
+ training=training,
2119
+ )
2120
+
2121
+ return outputs
2122
+
2123
+ def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput:
2124
+ # TODO: As is this currently fails with saved_model=True, because
2125
+ # TensorFlow cannot trace through nested dataclasses. Reference:
2126
+ # https://github.com/huggingface/transformers/pull/16886
2127
+ return output
2128
+
2129
+ def build(self, input_shape=None):
2130
+ if self.built:
2131
+ return
2132
+ self.built = True
2133
+ if getattr(self, "groupvit", None) is not None:
2134
+ with tf.name_scope(self.groupvit.name):
2135
+ self.groupvit.build(None)
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__init__.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
28
+ }
29
+
30
+ try:
31
+ if not is_sentencepiece_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_llama"] = ["LlamaTokenizer"]
37
+
38
+ try:
39
+ if not is_tokenizers_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"]
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ _import_structure["modeling_llama"] = [
53
+ "LlamaForCausalLM",
54
+ "LlamaModel",
55
+ "LlamaPreTrainedModel",
56
+ "LlamaForSequenceClassification",
57
+ "LlamaForQuestionAnswering",
58
+ ]
59
+
60
+ try:
61
+ if not is_flax_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"]
67
+
68
+
69
+ if TYPE_CHECKING:
70
+ from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
71
+
72
+ try:
73
+ if not is_sentencepiece_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .tokenization_llama import LlamaTokenizer
79
+
80
+ try:
81
+ if not is_tokenizers_available():
82
+ raise OptionalDependencyNotAvailable()
83
+ except OptionalDependencyNotAvailable:
84
+ pass
85
+ else:
86
+ from .tokenization_llama_fast import LlamaTokenizerFast
87
+
88
+ try:
89
+ if not is_torch_available():
90
+ raise OptionalDependencyNotAvailable()
91
+ except OptionalDependencyNotAvailable:
92
+ pass
93
+ else:
94
+ from .modeling_llama import (
95
+ LlamaForCausalLM,
96
+ LlamaForQuestionAnswering,
97
+ LlamaForSequenceClassification,
98
+ LlamaModel,
99
+ LlamaPreTrainedModel,
100
+ )
101
+
102
+ try:
103
+ if not is_flax_available():
104
+ raise OptionalDependencyNotAvailable()
105
+ except OptionalDependencyNotAvailable:
106
+ pass
107
+ else:
108
+ from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel
109
+
110
+
111
+ else:
112
+ import sys
113
+
114
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc ADDED
Binary file (8.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc ADDED
Binary file (47.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc ADDED
Binary file (18.4 kB). View file