applied-ai-018 commited on
Commit
9d93d66
·
verified ·
1 Parent(s): 1694a46

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. lm-evaluation-harness/tests/testdata/anagrams1-v0-res.json +1 -0
  3. lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-loglikelihood +1 -0
  4. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-res.json +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_expletive_it_object_raising-v0-loglikelihood +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-res.json +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-loglikelihood +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_npi_present_1-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-loglikelihood +1 -0
  11. lm-evaluation-harness/tests/testdata/crows_pairs_english_gender-v0-loglikelihood +1 -0
  12. lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/crows_pairs_french_religion-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/headqa_en-v0-loglikelihood +1 -0
  15. lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-loglikelihood +1 -0
  16. lm-evaluation-harness/tests/testdata/hendrycksTest-college_medicine-v0-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-loglikelihood +1 -0
  18. lm-evaluation-harness/tests/testdata/hendrycksTest-machine_learning-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-res.json +1 -0
  20. lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-loglikelihood +1 -0
  21. lm-evaluation-harness/tests/testdata/math_precalc-v0-res.json +1 -0
  22. lm-evaluation-harness/tests/testdata/openbookqa-v0-loglikelihood +1 -0
  23. lm-evaluation-harness/tests/testdata/pile_gutenberg-v1-loglikelihood_rolling +1 -0
  24. lm-evaluation-harness/tests/testdata/pile_pile-cc-v1-res.json +1 -0
  25. lm-evaluation-harness/tests/testdata/pile_uspto-v0-loglikelihood_rolling +1 -0
  26. lm-evaluation-harness/tests/testdata/qnli-v0-loglikelihood +1 -0
  27. lm-evaluation-harness/tests/testdata/race-v0-res.json +1 -0
  28. lm-evaluation-harness/tests/testdata/squad2-v1-res.json +1 -0
  29. lm-evaluation-harness/tests/testdata/swag-v0-res.json +1 -0
  30. lm-evaluation-harness/tests/testdata/toxigen-v0-res.json +1 -0
  31. lm-evaluation-harness/tests/testdata/triviaqa-v0-loglikelihood +1 -0
  32. lm-evaluation-harness/tests/testdata/wic-v0-loglikelihood +1 -0
  33. lm-evaluation-harness/tests/testdata/wmt14-en-fr-v0-greedy_until +1 -0
  34. lm-evaluation-harness/tests/testdata/wmt14-fr-en-v0-greedy_until +1 -0
  35. lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-greedy_until +1 -0
  36. lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-greedy_until +1 -0
  37. lm-evaluation-harness/tests/testdata/wsc-v0-res.json +1 -0
  38. venv/lib/python3.10/site-packages/transformers/models/barthez/__init__.py +59 -0
  39. venv/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez.py +287 -0
  43. venv/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py +195 -0
  44. venv/lib/python3.10/site-packages/transformers/models/detr/__init__.py +75 -0
  45. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_to_pytorch.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/feature_extraction_detr.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/modeling_detr.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/detr/configuration_detr.py +284 -0
  50. venv/lib/python3.10/site-packages/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py +278 -0
ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:314e9e440891db9a772e572b6c0492fec68fc0b8d36ead67bcd523a147b9cbdd
3
+ size 33555612
lm-evaluation-harness/tests/testdata/anagrams1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"anagrams1": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"anagrams1": 0}}
lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6ca1ca6ebd7cac4420d5005f7f35b0edbc921377f5e4f8874cc176e4fb6d79d4
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_irregular_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_irregular_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_expletive_it_object_raising-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ceede5b38248a62125a74a8332602b8eac5ef40864f071ad8d86e7971e07219d
lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_left_branch_island_echo_question": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_left_branch_island_echo_question": 0}}
lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a3a702a3335c79b02b36caf37c68069050c2a8a3a03c3610c09afc39d2b83fb1
lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_matrix_question_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_matrix_question_npi_licensor_present": 0}}
lm-evaluation-harness/tests/testdata/blimp_npi_present_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 3ef532a85e0ee8f8ff779bc7ddc873d515969a708da84a4eb4a85b7c843cf244
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 290e7eddacea4ec16989af697f2ee3373fdd9aef4b452bf887184c6e2f6e7d9d
lm-evaluation-harness/tests/testdata/crows_pairs_english_gender-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2bf62b7cc678f64ffad4a6e6715ff76a2b984bfe8d1165da4b76b3b4dfafb2f9
lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_disability": {"likelihood_difference": 0.31387939561315326, "likelihood_difference_stderr": 0.027598132299657168, "pct_stereotype": 0.36363636363636365, "pct_stereotype_stderr": 0.05966637484671758}}, "versions": {"crows_pairs_french_disability": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_french_religion-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_religion": {"likelihood_difference": 0.32691651640972225, "likelihood_difference_stderr": 0.021833493193249474, "pct_stereotype": 0.45217391304347826, "pct_stereotype_stderr": 0.046614569799583463}}, "versions": {"crows_pairs_french_religion": 0}}
lm-evaluation-harness/tests/testdata/headqa_en-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 09da45119b12a0144e3081f8fb790c2a22af7b9c3aac42f54423d348a711fbf5
lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ b3b27e9dbad587377d3c8cab1072782de883e245da93a563bd8b3099017b1fc0
lm-evaluation-harness/tests/testdata/hendrycksTest-college_medicine-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-college_medicine": {"acc": 0.27167630057803466, "acc_norm": 0.2543352601156069, "acc_norm_stderr": 0.0332055644308557, "acc_stderr": 0.03391750322321659}}, "versions": {"hendrycksTest-college_medicine": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 870d5a6300c527077aaf6baa3e750e75fa840b41657cf82549f39b768b14862d
lm-evaluation-harness/tests/testdata/hendrycksTest-machine_learning-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-machine_learning": {"acc": 0.23214285714285715, "acc_norm": 0.22321428571428573, "acc_norm_stderr": 0.039523019677025116, "acc_stderr": 0.04007341809755806}}, "versions": {"hendrycksTest-machine_learning": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-sociology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-sociology": {"acc": 0.23383084577114427, "acc_norm": 0.24875621890547264, "acc_norm_stderr": 0.030567675938916707, "acc_stderr": 0.02992941540834838}}, "versions": {"hendrycksTest-sociology": 0}}
lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fd87c6c5cf4e0499c5f9f80e5bd7ee6a4f3d2991902a0cc3ec9e6eaf22d6760a
lm-evaluation-harness/tests/testdata/math_precalc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_precalc": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_precalc": 0}}
lm-evaluation-harness/tests/testdata/openbookqa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 78a49a0ca1a47373adb33463b1d092e6bc0d8f4b01bcb380ada48065037849d7
lm-evaluation-harness/tests/testdata/pile_gutenberg-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 02a559f74a9105145e7d4d9c5ddea372b5b4938f5368dc8ffafc39cbe3b4c7ef
lm-evaluation-harness/tests/testdata/pile_pile-cc-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_pile-cc": {"bits_per_byte": 0.0001620742639125056, "byte_perplexity": 1.0001123476295946, "word_perplexity": 1.0006738958554477}}, "versions": {"pile_pile-cc": 1}}
lm-evaluation-harness/tests/testdata/pile_uspto-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 789b2bdb31564d512b70f801316f49320a26c83ba361226bac0afb255341d477
lm-evaluation-harness/tests/testdata/qnli-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4281d4ff5cf1244358b0ea0220c67863c69fbade850696b43e8ff05138e01e12
lm-evaluation-harness/tests/testdata/race-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"race": {"acc": 0.23253588516746412, "acc_stderr": 0.013074460615265295}}, "versions": {"race": 0}}
lm-evaluation-harness/tests/testdata/squad2-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"squad2": {"HasAns_exact": 0.0, "HasAns_f1": 0.0, "NoAns_exact": 0.0, "NoAns_f1": 0.0, "best_exact": 50.07159100480081, "best_f1": 50.07159100480081, "exact": 0.0, "f1": 0.0}}, "versions": {"squad2": 1}}
lm-evaluation-harness/tests/testdata/swag-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"swag": {"acc": 0.2482255323402979, "acc_norm": 0.24882535239428172, "acc_norm_stderr": 0.00305666959496067, "acc_stderr": 0.003054201832644171}}, "versions": {"swag": 0}}
lm-evaluation-harness/tests/testdata/toxigen-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"toxigen": {"acc": 0.5053191489361702, "acc_norm": 0.46808510638297873, "acc_norm_stderr": 0.016283609940023203, "acc_stderr": 0.016315959984563776}}, "versions": {"toxigen": 0}}
lm-evaluation-harness/tests/testdata/triviaqa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f8ec05b306b9f6187c0f8117cae441fb85a7a2e4670f4f9a1a3b632b1978421a
lm-evaluation-harness/tests/testdata/wic-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 403a08da05e4c44d7e3dd3358382a7ba489c41d223e24cd1a9ed82ef1a2d004b
lm-evaluation-harness/tests/testdata/wmt14-en-fr-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 368ae7eec0f902b5123f2d5197caa5109a23942011c53fe68d9eaeee20180e46
lm-evaluation-harness/tests/testdata/wmt14-fr-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ c1d9f7283755fbdd7ecd6cc4278b0ac25a80ac256b7071ea5f839ccd038e5974
lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 67f0333ddbcb07d7a9ac12919129a18fe4fea24e4826a11bbdde4fd5ed5ed83f
lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 1fd846f3c0104e794eb380dae7f648592092ab8bf59234c26d0a671bbbc28df1
lm-evaluation-harness/tests/testdata/wsc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wsc": {"acc": 0.5480769230769231, "acc_stderr": 0.049038186969314335}}, "versions": {"wsc": 0}}
venv/lib/python3.10/site-packages/transformers/models/barthez/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_barthez"] = ["BarthezTokenizer"]
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_barthez_fast"] = ["BarthezTokenizerFast"]
37
+
38
+
39
+ if TYPE_CHECKING:
40
+ try:
41
+ if not is_sentencepiece_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .tokenization_barthez import BarthezTokenizer
47
+
48
+ try:
49
+ if not is_tokenizers_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_barthez_fast import BarthezTokenizerFast
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (922 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc ADDED
Binary file (7.11 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for the BARThez model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+ # TODO this class is useless. This is the most standard sentencpiece model. Let's find which one is closest and nuke this.
36
+
37
+
38
+ class BarthezTokenizer(PreTrainedTokenizer):
39
+ """
40
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a BARThez tokenizer. Based on
41
+ [SentencePiece](https://github.com/google/sentencepiece).
42
+
43
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
44
+ this superclass for more information regarding those methods.
45
+
46
+ Args:
47
+ vocab_file (`str`):
48
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
49
+ contains the vocabulary necessary to instantiate a tokenizer.
50
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
51
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
52
+
53
+ <Tip>
54
+
55
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
56
+ sequence. The token used is the `cls_token`.
57
+
58
+ </Tip>
59
+
60
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
61
+ The end of sequence token.
62
+
63
+ <Tip>
64
+
65
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
66
+ The token used is the `sep_token`.
67
+
68
+ </Tip>
69
+
70
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
71
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
72
+ sequence classification or for a text and a question for question answering. It is also used as the last
73
+ token of a sequence built with special tokens.
74
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
75
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
76
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
77
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
78
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
79
+ token instead.
80
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
81
+ The token used for padding, for example when batching sequences of different lengths.
82
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ sp_model_kwargs (`dict`, *optional*):
86
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
87
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
88
+ to set:
89
+
90
+ - `enable_sampling`: Enable subword regularization.
91
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
92
+
93
+ - `nbest_size = {0,1}`: No sampling is performed.
94
+ - `nbest_size > 1`: samples from the nbest_size results.
95
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
96
+ using forward-filtering-and-backward-sampling algorithm.
97
+
98
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
99
+ BPE-dropout.
100
+
101
+ Attributes:
102
+ sp_model (`SentencePieceProcessor`):
103
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
104
+ """
105
+
106
+ vocab_files_names = VOCAB_FILES_NAMES
107
+ model_input_names = ["input_ids", "attention_mask"]
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_file,
112
+ bos_token="<s>",
113
+ eos_token="</s>",
114
+ sep_token="</s>",
115
+ cls_token="<s>",
116
+ unk_token="<unk>",
117
+ pad_token="<pad>",
118
+ mask_token="<mask>",
119
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
120
+ **kwargs,
121
+ ) -> None:
122
+ # Mask token behave like a normal word, i.e. include the space before it. Will have normalized=False by default this way
123
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
124
+
125
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
126
+
127
+ self.vocab_file = vocab_file
128
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
129
+ self.sp_model.Load(str(vocab_file))
130
+ super().__init__(
131
+ bos_token=bos_token,
132
+ eos_token=eos_token,
133
+ unk_token=unk_token,
134
+ sep_token=sep_token,
135
+ cls_token=cls_token,
136
+ pad_token=pad_token,
137
+ mask_token=mask_token,
138
+ sp_model_kwargs=self.sp_model_kwargs,
139
+ **kwargs,
140
+ )
141
+
142
+ def build_inputs_with_special_tokens(
143
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
144
+ ) -> List[int]:
145
+ """
146
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
147
+ adding special tokens. A BARThez sequence has the following format:
148
+
149
+ - single sequence: `<s> X </s>`
150
+ - pair of sequences: `<s> A </s></s> B </s>`
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs to which the special tokens will be added.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
160
+ """
161
+
162
+ if token_ids_1 is None:
163
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
164
+ cls = [self.cls_token_id]
165
+ sep = [self.sep_token_id]
166
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
167
+
168
+ def get_special_tokens_mask(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
170
+ ) -> List[int]:
171
+ """
172
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
173
+ special tokens using the tokenizer `prepare_for_model` method.
174
+
175
+ Args:
176
+ token_ids_0 (`List[int]`):
177
+ List of IDs.
178
+ token_ids_1 (`List[int]`, *optional*):
179
+ Optional second list of IDs for sequence pairs.
180
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
181
+ Whether or not the token list is already formatted with special tokens for the model.
182
+
183
+ Returns:
184
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
185
+ """
186
+ if already_has_special_tokens:
187
+ return super().get_special_tokens_mask(
188
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
189
+ )
190
+
191
+ if token_ids_1 is None:
192
+ return [1] + ([0] * len(token_ids_0)) + [1]
193
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
194
+
195
+ def create_token_type_ids_from_sequences(
196
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
197
+ ) -> List[int]:
198
+ """
199
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+
207
+ Returns:
208
+ `List[int]`: List of zeros.
209
+ """
210
+ sep = [self.sep_token_id]
211
+ cls = [self.cls_token_id]
212
+
213
+ if token_ids_1 is None:
214
+ return len(cls + token_ids_0 + sep) * [0]
215
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
216
+
217
+ @property
218
+ def vocab_size(self):
219
+ return len(self.sp_model)
220
+
221
+ def get_vocab(self):
222
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
223
+ vocab.update(self.added_tokens_encoder)
224
+ return vocab
225
+
226
+ def _tokenize(self, text: str) -> List[str]:
227
+ return self.sp_model.encode(text, out_type=str)
228
+
229
+ def _convert_token_to_id(self, token):
230
+ """Converts a token (str) in an id using the vocab."""
231
+ return self.sp_model.PieceToId(token)
232
+
233
+ def _convert_id_to_token(self, index):
234
+ """Converts an index (integer) in a token (str) using the vocab."""
235
+ return self.sp_model.IdToPiece(index)
236
+
237
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
238
+ def convert_tokens_to_string(self, tokens):
239
+ """Converts a sequence of tokens (string) in a single string."""
240
+ current_sub_tokens = []
241
+ out_string = ""
242
+ prev_is_special = False
243
+ for token in tokens:
244
+ # make sure that special tokens are not decoded using sentencepiece model
245
+ if token in self.all_special_tokens:
246
+ if not prev_is_special:
247
+ out_string += " "
248
+ out_string += self.sp_model.decode(current_sub_tokens) + token
249
+ prev_is_special = True
250
+ current_sub_tokens = []
251
+ else:
252
+ current_sub_tokens.append(token)
253
+ prev_is_special = False
254
+ out_string += self.sp_model.decode(current_sub_tokens)
255
+ return out_string.strip()
256
+
257
+ def __getstate__(self):
258
+ state = self.__dict__.copy()
259
+ state["sp_model"] = None
260
+ return state
261
+
262
+ def __setstate__(self, d):
263
+ self.__dict__ = d
264
+
265
+ # for backward compatibility
266
+ if not hasattr(self, "sp_model_kwargs"):
267
+ self.sp_model_kwargs = {}
268
+
269
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
270
+ self.sp_model.Load(self.vocab_file)
271
+
272
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
273
+ if not os.path.isdir(save_directory):
274
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
275
+ return
276
+ out_vocab_file = os.path.join(
277
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
278
+ )
279
+
280
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
281
+ copyfile(self.vocab_file, out_vocab_file)
282
+ elif not os.path.isfile(self.vocab_file):
283
+ with open(out_vocab_file, "wb") as fi:
284
+ content_spiece_model = self.sp_model.serialized_model_proto()
285
+ fi.write(content_spiece_model)
286
+
287
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for the BARThez model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_barthez import BarthezTokenizer
29
+ else:
30
+ BarthezTokenizer = None
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+
37
+ SPIECE_UNDERLINE = "▁"
38
+
39
+
40
+ class BarthezTokenizerFast(PreTrainedTokenizerFast):
41
+ """
42
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a "fast" BARThez tokenizer. Based on
43
+ [SentencePiece](https://github.com/google/sentencepiece).
44
+
45
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
46
+ refer to this superclass for more information regarding those methods.
47
+
48
+ Args:
49
+ vocab_file (`str`):
50
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
51
+ contains the vocabulary necessary to instantiate a tokenizer.
52
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
53
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
54
+
55
+ <Tip>
56
+
57
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
58
+ sequence. The token used is the `cls_token`.
59
+
60
+ </Tip>
61
+
62
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
63
+ The end of sequence token.
64
+
65
+ <Tip>
66
+
67
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
68
+ The token used is the `sep_token`.
69
+
70
+ </Tip>
71
+
72
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
77
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
78
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
79
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
80
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
81
+ token instead.
82
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
83
+ The token used for padding, for example when batching sequences of different lengths.
84
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
85
+ The token used for masking values. This is the token used when training this model with masked language
86
+ modeling. This is the token which the model will try to predict.
87
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
88
+ Additional special tokens used by the tokenizer.
89
+ """
90
+
91
+ vocab_files_names = VOCAB_FILES_NAMES
92
+ model_input_names = ["input_ids", "attention_mask"]
93
+ slow_tokenizer_class = BarthezTokenizer
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_file=None,
98
+ tokenizer_file=None,
99
+ bos_token="<s>",
100
+ eos_token="</s>",
101
+ sep_token="</s>",
102
+ cls_token="<s>",
103
+ unk_token="<unk>",
104
+ pad_token="<pad>",
105
+ mask_token="<mask>",
106
+ **kwargs,
107
+ ):
108
+ # Mask token behave like a normal word, i.e. include the space before it
109
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
110
+
111
+ super().__init__(
112
+ vocab_file,
113
+ tokenizer_file=tokenizer_file,
114
+ bos_token=bos_token,
115
+ eos_token=eos_token,
116
+ unk_token=unk_token,
117
+ sep_token=sep_token,
118
+ cls_token=cls_token,
119
+ pad_token=pad_token,
120
+ mask_token=mask_token,
121
+ **kwargs,
122
+ )
123
+
124
+ self.vocab_file = vocab_file
125
+
126
+ @property
127
+ def can_save_slow_tokenizer(self) -> bool:
128
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
129
+
130
+ def build_inputs_with_special_tokens(
131
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
132
+ ) -> List[int]:
133
+ """
134
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
135
+ adding special tokens. A BARThez sequence has the following format:
136
+
137
+ - single sequence: `<s> X </s>`
138
+ - pair of sequences: `<s> A </s></s> B </s>`
139
+
140
+ Args:
141
+ token_ids_0 (`List[int]`):
142
+ List of IDs to which the special tokens will be added.
143
+ token_ids_1 (`List[int]`, *optional*):
144
+ Optional second list of IDs for sequence pairs.
145
+
146
+ Returns:
147
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
148
+ """
149
+
150
+ if token_ids_1 is None:
151
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
152
+ cls = [self.cls_token_id]
153
+ sep = [self.sep_token_id]
154
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
155
+
156
+ def create_token_type_ids_from_sequences(
157
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
158
+ ) -> List[int]:
159
+ """
160
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
161
+
162
+ Args:
163
+ token_ids_0 (`List[int]`):
164
+ List of IDs.
165
+ token_ids_1 (`List[int]`, *optional*):
166
+ Optional second list of IDs for sequence pairs.
167
+
168
+ Returns:
169
+ `List[int]`: List of zeros.
170
+ """
171
+ sep = [self.sep_token_id]
172
+ cls = [self.cls_token_id]
173
+
174
+ if token_ids_1 is None:
175
+ return len(cls + token_ids_0 + sep) * [0]
176
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
177
+
178
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
179
+ if not self.can_save_slow_tokenizer:
180
+ raise ValueError(
181
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
182
+ "tokenizer."
183
+ )
184
+
185
+ if not os.path.isdir(save_directory):
186
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
187
+ return
188
+ out_vocab_file = os.path.join(
189
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
190
+ )
191
+
192
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
193
+ copyfile(self.vocab_file, out_vocab_file)
194
+
195
+ return (out_vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/detr/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {"configuration_detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig", "DetrOnnxConfig"]}
21
+
22
+ try:
23
+ if not is_vision_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["feature_extraction_detr"] = ["DetrFeatureExtractor"]
29
+ _import_structure["image_processing_detr"] = ["DetrImageProcessor"]
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_detr"] = [
38
+ "DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "DetrForObjectDetection",
40
+ "DetrForSegmentation",
41
+ "DetrModel",
42
+ "DetrPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig, DetrOnnxConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .feature_extraction_detr import DetrFeatureExtractor
56
+ from .image_processing_detr import DetrImageProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_detr import (
65
+ DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ DetrForObjectDetection,
67
+ DetrForSegmentation,
68
+ DetrModel,
69
+ DetrPreTrainedModel,
70
+ )
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (7.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/convert_detr_to_pytorch.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/feature_extraction_detr.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/modeling_detr.cpython-310.pyc ADDED
Binary file (86.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/detr/configuration_detr.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Facebook AI Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DETR model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ..auto import CONFIG_MAPPING
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class DetrConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`DetrModel`]. It is used to instantiate a DETR
37
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
38
+ defaults will yield a similar configuration to that of the DETR
39
+ [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
46
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
47
+ API.
48
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
49
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
50
+ case it will default to `ResNetConfig()`.
51
+ num_channels (`int`, *optional*, defaults to 3):
52
+ The number of input channels.
53
+ num_queries (`int`, *optional*, defaults to 100):
54
+ Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetrModel`] can
55
+ detect in a single image. For COCO, we recommend 100 queries.
56
+ d_model (`int`, *optional*, defaults to 256):
57
+ Dimension of the layers.
58
+ encoder_layers (`int`, *optional*, defaults to 6):
59
+ Number of encoder layers.
60
+ decoder_layers (`int`, *optional*, defaults to 6):
61
+ Number of decoder layers.
62
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
63
+ Number of attention heads for each attention layer in the Transformer encoder.
64
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
65
+ Number of attention heads for each attention layer in the Transformer decoder.
66
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
67
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
68
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
69
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
70
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
71
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
72
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
73
+ dropout (`float`, *optional*, defaults to 0.1):
74
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
75
+ attention_dropout (`float`, *optional*, defaults to 0.0):
76
+ The dropout ratio for the attention probabilities.
77
+ activation_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout ratio for activations inside the fully connected layer.
79
+ init_std (`float`, *optional*, defaults to 0.02):
80
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
81
+ init_xavier_std (`float`, *optional*, defaults to 1):
82
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
83
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
84
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
85
+ for more details.
86
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
87
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
88
+ for more details.
89
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
90
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
91
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
92
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
93
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
94
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
95
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
96
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
97
+ use_pretrained_backbone (`bool`, *optional*, `True`):
98
+ Whether to use pretrained weights for the backbone.
99
+ backbone_kwargs (`dict`, *optional*):
100
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
101
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
102
+ dilation (`bool`, *optional*, defaults to `False`):
103
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
104
+ `use_timm_backbone` = `True`.
105
+ class_cost (`float`, *optional*, defaults to 1):
106
+ Relative weight of the classification error in the Hungarian matching cost.
107
+ bbox_cost (`float`, *optional*, defaults to 5):
108
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
109
+ giou_cost (`float`, *optional*, defaults to 2):
110
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
111
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
112
+ Relative weight of the Focal loss in the panoptic segmentation loss.
113
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
114
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
115
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
116
+ Relative weight of the L1 bounding box loss in the object detection loss.
117
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
118
+ Relative weight of the generalized IoU loss in the object detection loss.
119
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
120
+ Relative classification weight of the 'no-object' class in the object detection loss.
121
+
122
+ Examples:
123
+
124
+ ```python
125
+ >>> from transformers import DetrConfig, DetrModel
126
+
127
+ >>> # Initializing a DETR facebook/detr-resnet-50 style configuration
128
+ >>> configuration = DetrConfig()
129
+
130
+ >>> # Initializing a model (with random weights) from the facebook/detr-resnet-50 style configuration
131
+ >>> model = DetrModel(configuration)
132
+
133
+ >>> # Accessing the model configuration
134
+ >>> configuration = model.config
135
+ ```"""
136
+
137
+ model_type = "detr"
138
+ keys_to_ignore_at_inference = ["past_key_values"]
139
+ attribute_map = {
140
+ "hidden_size": "d_model",
141
+ "num_attention_heads": "encoder_attention_heads",
142
+ }
143
+
144
+ def __init__(
145
+ self,
146
+ use_timm_backbone=True,
147
+ backbone_config=None,
148
+ num_channels=3,
149
+ num_queries=100,
150
+ encoder_layers=6,
151
+ encoder_ffn_dim=2048,
152
+ encoder_attention_heads=8,
153
+ decoder_layers=6,
154
+ decoder_ffn_dim=2048,
155
+ decoder_attention_heads=8,
156
+ encoder_layerdrop=0.0,
157
+ decoder_layerdrop=0.0,
158
+ is_encoder_decoder=True,
159
+ activation_function="relu",
160
+ d_model=256,
161
+ dropout=0.1,
162
+ attention_dropout=0.0,
163
+ activation_dropout=0.0,
164
+ init_std=0.02,
165
+ init_xavier_std=1.0,
166
+ auxiliary_loss=False,
167
+ position_embedding_type="sine",
168
+ backbone="resnet50",
169
+ use_pretrained_backbone=True,
170
+ backbone_kwargs=None,
171
+ dilation=False,
172
+ class_cost=1,
173
+ bbox_cost=5,
174
+ giou_cost=2,
175
+ mask_loss_coefficient=1,
176
+ dice_loss_coefficient=1,
177
+ bbox_loss_coefficient=5,
178
+ giou_loss_coefficient=2,
179
+ eos_coefficient=0.1,
180
+ **kwargs,
181
+ ):
182
+ if not use_timm_backbone and use_pretrained_backbone:
183
+ raise ValueError(
184
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
185
+ )
186
+
187
+ if backbone_config is not None and backbone is not None:
188
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
189
+
190
+ if backbone_config is not None and use_timm_backbone:
191
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
192
+
193
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
194
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
195
+
196
+ if not use_timm_backbone:
197
+ if backbone_config is None:
198
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
199
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
200
+ elif isinstance(backbone_config, dict):
201
+ backbone_model_type = backbone_config.get("model_type")
202
+ config_class = CONFIG_MAPPING[backbone_model_type]
203
+ backbone_config = config_class.from_dict(backbone_config)
204
+ # set timm attributes to None
205
+ dilation, backbone, use_pretrained_backbone = None, None, None
206
+
207
+ self.use_timm_backbone = use_timm_backbone
208
+ self.backbone_config = backbone_config
209
+ self.num_channels = num_channels
210
+ self.num_queries = num_queries
211
+ self.d_model = d_model
212
+ self.encoder_ffn_dim = encoder_ffn_dim
213
+ self.encoder_layers = encoder_layers
214
+ self.encoder_attention_heads = encoder_attention_heads
215
+ self.decoder_ffn_dim = decoder_ffn_dim
216
+ self.decoder_layers = decoder_layers
217
+ self.decoder_attention_heads = decoder_attention_heads
218
+ self.dropout = dropout
219
+ self.attention_dropout = attention_dropout
220
+ self.activation_dropout = activation_dropout
221
+ self.activation_function = activation_function
222
+ self.init_std = init_std
223
+ self.init_xavier_std = init_xavier_std
224
+ self.encoder_layerdrop = encoder_layerdrop
225
+ self.decoder_layerdrop = decoder_layerdrop
226
+ self.num_hidden_layers = encoder_layers
227
+ self.auxiliary_loss = auxiliary_loss
228
+ self.position_embedding_type = position_embedding_type
229
+ self.backbone = backbone
230
+ self.use_pretrained_backbone = use_pretrained_backbone
231
+ self.backbone_kwargs = backbone_kwargs
232
+ self.dilation = dilation
233
+ # Hungarian matcher
234
+ self.class_cost = class_cost
235
+ self.bbox_cost = bbox_cost
236
+ self.giou_cost = giou_cost
237
+ # Loss coefficients
238
+ self.mask_loss_coefficient = mask_loss_coefficient
239
+ self.dice_loss_coefficient = dice_loss_coefficient
240
+ self.bbox_loss_coefficient = bbox_loss_coefficient
241
+ self.giou_loss_coefficient = giou_loss_coefficient
242
+ self.eos_coefficient = eos_coefficient
243
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
244
+
245
+ @property
246
+ def num_attention_heads(self) -> int:
247
+ return self.encoder_attention_heads
248
+
249
+ @property
250
+ def hidden_size(self) -> int:
251
+ return self.d_model
252
+
253
+ @classmethod
254
+ def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
255
+ """Instantiate a [`DetrConfig`] (or a derived class) from a pre-trained backbone model configuration.
256
+
257
+ Args:
258
+ backbone_config ([`PretrainedConfig`]):
259
+ The backbone configuration.
260
+ Returns:
261
+ [`DetrConfig`]: An instance of a configuration object
262
+ """
263
+ return cls(backbone_config=backbone_config, **kwargs)
264
+
265
+
266
+ class DetrOnnxConfig(OnnxConfig):
267
+ torch_onnx_minimum_version = version.parse("1.11")
268
+
269
+ @property
270
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
271
+ return OrderedDict(
272
+ [
273
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
274
+ ("pixel_mask", {0: "batch"}),
275
+ ]
276
+ )
277
+
278
+ @property
279
+ def atol_for_validation(self) -> float:
280
+ return 1e-5
281
+
282
+ @property
283
+ def default_onnx_opset(self) -> int:
284
+ return 12
venv/lib/python3.10/site-packages/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETR checkpoints with timm backbone."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from collections import OrderedDict
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor
29
+ from transformers.utils import logging
30
+
31
+
32
+ logging.set_verbosity_info()
33
+ logger = logging.get_logger(__name__)
34
+
35
+ # here we list all keys to be renamed (original name on the left, our name on the right)
36
+ rename_keys = []
37
+ for i in range(6):
38
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
39
+ rename_keys.append(
40
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
41
+ )
42
+ rename_keys.append(
43
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
44
+ )
45
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
46
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
47
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
48
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
49
+ rename_keys.append(
50
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
51
+ )
52
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
53
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
54
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
55
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
56
+ rename_keys.append(
57
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
58
+ )
59
+ rename_keys.append(
60
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
61
+ )
62
+ rename_keys.append(
63
+ (
64
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
65
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
66
+ )
67
+ )
68
+ rename_keys.append(
69
+ (
70
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
71
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
72
+ )
73
+ )
74
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
75
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
76
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
77
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
78
+ rename_keys.append(
79
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
80
+ )
81
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
82
+ rename_keys.append(
83
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
84
+ )
85
+ rename_keys.append(
86
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
87
+ )
88
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
89
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
90
+
91
+ # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
92
+ rename_keys.extend(
93
+ [
94
+ ("input_proj.weight", "input_projection.weight"),
95
+ ("input_proj.bias", "input_projection.bias"),
96
+ ("query_embed.weight", "query_position_embeddings.weight"),
97
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
98
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
99
+ ("class_embed.weight", "class_labels_classifier.weight"),
100
+ ("class_embed.bias", "class_labels_classifier.bias"),
101
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
102
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
103
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
104
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
105
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
106
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
107
+ ]
108
+ )
109
+
110
+
111
+ def rename_key(state_dict, old, new):
112
+ val = state_dict.pop(old)
113
+ state_dict[new] = val
114
+
115
+
116
+ def rename_backbone_keys(state_dict):
117
+ new_state_dict = OrderedDict()
118
+ for key, value in state_dict.items():
119
+ if "backbone.0.body" in key:
120
+ new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
121
+ new_state_dict[new_key] = value
122
+ else:
123
+ new_state_dict[key] = value
124
+
125
+ return new_state_dict
126
+
127
+
128
+ def read_in_q_k_v(state_dict, is_panoptic=False):
129
+ prefix = ""
130
+ if is_panoptic:
131
+ prefix = "detr."
132
+
133
+ # first: transformer encoder
134
+ for i in range(6):
135
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
136
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
137
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
138
+ # next, add query, keys and values (in that order) to the state dict
139
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
140
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
141
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
142
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
143
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
144
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
145
+ # next: transformer decoder (which is a bit more complex because it also includes cross-attention)
146
+ for i in range(6):
147
+ # read in weights + bias of input projection layer of self-attention
148
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
149
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
150
+ # next, add query, keys and values (in that order) to the state dict
151
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
152
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
153
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
154
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
155
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
156
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
157
+ # read in weights + bias of input projection layer of cross-attention
158
+ in_proj_weight_cross_attn = state_dict.pop(
159
+ f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
160
+ )
161
+ in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
162
+ # next, add query, keys and values (in that order) of cross-attention to the state dict
163
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
164
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
165
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
166
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
167
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
168
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
169
+
170
+
171
+ # We will verify our results on an image of cute cats
172
+ def prepare_img():
173
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
174
+ im = Image.open(requests.get(url, stream=True).raw)
175
+
176
+ return im
177
+
178
+
179
+ @torch.no_grad()
180
+ def convert_detr_checkpoint(model_name, pytorch_dump_folder_path):
181
+ """
182
+ Copy/paste/tweak model's weights to our DETR structure.
183
+ """
184
+
185
+ # load default config
186
+ config = DetrConfig()
187
+ # set backbone and dilation attributes
188
+ if "resnet101" in model_name:
189
+ config.backbone = "resnet101"
190
+ if "dc5" in model_name:
191
+ config.dilation = True
192
+ is_panoptic = "panoptic" in model_name
193
+ if is_panoptic:
194
+ config.num_labels = 250
195
+ else:
196
+ config.num_labels = 91
197
+ repo_id = "huggingface/label-files"
198
+ filename = "coco-detection-id2label.json"
199
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
200
+ id2label = {int(k): v for k, v in id2label.items()}
201
+ config.id2label = id2label
202
+ config.label2id = {v: k for k, v in id2label.items()}
203
+
204
+ # load image processor
205
+ format = "coco_panoptic" if is_panoptic else "coco_detection"
206
+ image_processor = DetrImageProcessor(format=format)
207
+
208
+ # prepare image
209
+ img = prepare_img()
210
+ encoding = image_processor(images=img, return_tensors="pt")
211
+ pixel_values = encoding["pixel_values"]
212
+
213
+ logger.info(f"Converting model {model_name}...")
214
+
215
+ # load original model from torch hub
216
+ detr = torch.hub.load("facebookresearch/detr", model_name, pretrained=True).eval()
217
+ state_dict = detr.state_dict()
218
+ # rename keys
219
+ for src, dest in rename_keys:
220
+ if is_panoptic:
221
+ src = "detr." + src
222
+ rename_key(state_dict, src, dest)
223
+ state_dict = rename_backbone_keys(state_dict)
224
+ # query, key and value matrices need special treatment
225
+ read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
226
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
227
+ prefix = "detr.model." if is_panoptic else "model."
228
+ for key in state_dict.copy().keys():
229
+ if is_panoptic:
230
+ if (
231
+ key.startswith("detr")
232
+ and not key.startswith("class_labels_classifier")
233
+ and not key.startswith("bbox_predictor")
234
+ ):
235
+ val = state_dict.pop(key)
236
+ state_dict["detr.model" + key[4:]] = val
237
+ elif "class_labels_classifier" in key or "bbox_predictor" in key:
238
+ val = state_dict.pop(key)
239
+ state_dict["detr." + key] = val
240
+ elif key.startswith("bbox_attention") or key.startswith("mask_head"):
241
+ continue
242
+ else:
243
+ val = state_dict.pop(key)
244
+ state_dict[prefix + key] = val
245
+ else:
246
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
247
+ val = state_dict.pop(key)
248
+ state_dict[prefix + key] = val
249
+ # finally, create HuggingFace model and load state dict
250
+ model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config)
251
+ model.load_state_dict(state_dict)
252
+ model.eval()
253
+ # verify our conversion
254
+ original_outputs = detr(pixel_values)
255
+ outputs = model(pixel_values)
256
+ assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4)
257
+ assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4)
258
+ if is_panoptic:
259
+ assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
260
+
261
+ # Save model and image processor
262
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
263
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
264
+ model.save_pretrained(pytorch_dump_folder_path)
265
+ image_processor.save_pretrained(pytorch_dump_folder_path)
266
+
267
+
268
+ if __name__ == "__main__":
269
+ parser = argparse.ArgumentParser()
270
+
271
+ parser.add_argument(
272
+ "--model_name", default="detr_resnet50", type=str, help="Name of the DETR model you'd like to convert."
273
+ )
274
+ parser.add_argument(
275
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
276
+ )
277
+ args = parser.parse_args()
278
+ convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)