applied-ai-018 commited on
Commit
1694a46
·
verified ·
1 Parent(s): 53b1d90

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  3. lm-evaluation-harness/tests/testdata/arc_challenge-v0-res.json +1 -0
  4. lm-evaluation-harness/tests/testdata/blimp_causative-v0-res.json +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_1-v0-loglikelihood +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_1-v0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_2-v0-loglikelihood +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-loglikelihood +1 -0
  11. lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-loglikelihood +1 -0
  13. lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_scope-v0-loglikelihood +1 -0
  14. lm-evaluation-harness/tests/testdata/blimp_wh_island-v0-loglikelihood +1 -0
  15. lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-res.json +1 -0
  16. lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-loglikelihood +1 -0
  17. lm-evaluation-harness/tests/testdata/copa-v0-res.json +1 -0
  18. lm-evaluation-harness/tests/testdata/crows_pairs_french-v0-loglikelihood +1 -0
  19. lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-res.json +1 -0
  20. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-res.json +1 -0
  21. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-loglikelihood +1 -0
  22. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_us_history-v0-res.json +1 -0
  23. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-res.json +1 -0
  24. lm-evaluation-harness/tests/testdata/hendrycksTest-human_aging-v0-res.json +1 -0
  25. lm-evaluation-harness/tests/testdata/hendrycksTest-marketing-v0-loglikelihood +1 -0
  26. lm-evaluation-harness/tests/testdata/hendrycksTest-professional_law-v0-loglikelihood +1 -0
  27. lm-evaluation-harness/tests/testdata/hendrycksTest-professional_psychology-v0-res.json +1 -0
  28. lm-evaluation-harness/tests/testdata/hendrycksTest-public_relations-v0-res.json +1 -0
  29. lm-evaluation-harness/tests/testdata/lambada_mt_es-v0-loglikelihood +1 -0
  30. lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-loglikelihood +1 -0
  31. lm-evaluation-harness/tests/testdata/math_algebra-v0-res.json +1 -0
  32. lm-evaluation-harness/tests/testdata/mathqa-v0-loglikelihood +1 -0
  33. lm-evaluation-harness/tests/testdata/mrpc-v0-res.json +1 -0
  34. lm-evaluation-harness/tests/testdata/pile_gutenberg-v0-res.json +1 -0
  35. lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling +1 -0
  36. lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-res.json +1 -0
  37. lm-evaluation-harness/tests/testdata/pile_stackexchange-v0-loglikelihood_rolling +1 -0
  38. lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-res.json +1 -0
  39. lm-evaluation-harness/tests/testdata/winogrande-v0-res.json +1 -0
  40. lm-evaluation-harness/tests/testdata/wmt14-en-fr-v0-res.json +1 -0
  41. lm-evaluation-harness/tests/testdata/wmt16-en-de-v0-greedy_until +1 -0
  42. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so +3 -0
  43. venv/lib/python3.10/site-packages/transformers/models/clvp/__init__.py +83 -0
  44. venv/lib/python3.10/site-packages/transformers/models/clvp/configuration_clvp.py +456 -0
  45. venv/lib/python3.10/site-packages/transformers/models/dit/__init__.py +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py +231 -0
  49. venv/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -87,3 +87,4 @@ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=
87
  venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
88
  venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
89
  venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
 
 
87
  venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
88
  venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
89
  venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
90
+ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63eeaae098cc25b501ed9d1533209c6d19ec44b0df8c8458153bc6dc19cf37a0
3
+ size 33555627
lm-evaluation-harness/tests/testdata/arc_challenge-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arc_challenge": {"acc": 0.24488054607508533, "acc_norm": 0.2440273037542662, "acc_norm_stderr": 0.012551447627856257, "acc_stderr": 0.012566273985131354}}, "versions": {"arc_challenge": 0}}
lm-evaluation-harness/tests/testdata/blimp_causative-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_causative": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_causative": 0}}
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_irregular_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_irregular_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d14e4b7fcdd68991eb39b9cf3ade4b37dee9ddd39b688f861d81a327e47a969f
lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_ellipsis_n_bar_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_ellipsis_n_bar_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_irregular_plural_subject_verb_agreement_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9534751f83a86b6cbe1fb12fb9feb827b0b7836a663108928b4ecc1d70b08871
lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 49d2b8ce6667a6166fdc2a2e5dbe7ff07d9b8415e9f33482aef15956b3ebc24a
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ eb5ddf0a97982373ab1a4e58267cfcdebdecdb86c376dfd5ebf46737c9d3ee12
lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_regular_plural_subject_verb_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_regular_plural_subject_verb_agreement_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ e6666c5657215ff4bfd646b8ee3ae6df956e71c0be9ab1c287fb1b68291dd0d1
lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_scope-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 32fcbd0a1c6e664af2751bad552587b5ca3911973b07f4fb2cf0a2acd3de5349
lm-evaluation-harness/tests/testdata/blimp_wh_island-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 91a9e4b60b0f3572a7fdbd7648d0e69f36e5eb34db715315b0082558d7ed8b65
lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_questions_object_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_questions_object_gap": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d1d3e439b2020ef5ed232bfebbcc9634adc5117e9eb61e38fdbbe2c8ea128d54
lm-evaluation-harness/tests/testdata/copa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"copa": {"acc": 0.48, "acc_stderr": 0.050211673156867795}}, "versions": {"copa": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_french-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4fb61dcf4d2c59d6470b297a01d5f429ee442864e225e1760fbf191b2a0901cd
lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-abstract_algebra": {"acc": 0.32, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235, "acc_stderr": 0.04688261722621504}}, "versions": {"hendrycksTest-abstract_algebra": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_biology": {"acc": 0.23870967741935484, "acc_norm": 0.2709677419354839, "acc_norm_stderr": 0.025284416114900152, "acc_stderr": 0.024251071262208834}}, "versions": {"hendrycksTest-high_school_biology": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0e4c8d13806d3696167e40544d2d114c557c10c74bc61fcb9c51bbfced0266ef
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_us_history-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_us_history": {"acc": 0.29901960784313725, "acc_norm": 0.28431372549019607, "acc_norm_stderr": 0.03166009679399814, "acc_stderr": 0.03213325717373618}}, "versions": {"hendrycksTest-high_school_us_history": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_world_history": {"acc": 0.23628691983122363, "acc_norm": 0.24472573839662448, "acc_norm_stderr": 0.02798569938703642, "acc_stderr": 0.027652153144159263}}, "versions": {"hendrycksTest-high_school_world_history": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-human_aging-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-human_aging": {"acc": 0.21524663677130046, "acc_norm": 0.17937219730941703, "acc_norm_stderr": 0.025749819569192804, "acc_stderr": 0.02758406660220827}}, "versions": {"hendrycksTest-human_aging": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-marketing-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ b4fa0681fe54671a80509779d4338d744097a7206687f62977df7145dfa74a66
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_law-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c38c9d5d84eeb7a5f3c4a34d6e70d7e15847b3c38f26e4b119c982bb935e118f
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_psychology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-professional_psychology": {"acc": 0.27124183006535946, "acc_norm": 0.2826797385620915, "acc_norm_stderr": 0.01821726955205344, "acc_stderr": 0.01798661530403031}}, "versions": {"hendrycksTest-professional_psychology": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-public_relations-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-public_relations": {"acc": 0.3090909090909091, "acc_norm": 0.2636363636363636, "acc_norm_stderr": 0.04220224692971987, "acc_stderr": 0.044262946482000985}}, "versions": {"hendrycksTest-public_relations": 0}}
lm-evaluation-harness/tests/testdata/lambada_mt_es-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4a88f4b316c72fe0396c382d6cbb33568ac4d0ad225150d3536635c085359fc9
lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4a88f4b316c72fe0396c382d6cbb33568ac4d0ad225150d3536635c085359fc9
lm-evaluation-harness/tests/testdata/math_algebra-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_algebra": 0}}
lm-evaluation-harness/tests/testdata/mathqa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a45260e49f02c7cb8886b3746db4d388890860b202dd8a9f0267e3c324e0af13
lm-evaluation-harness/tests/testdata/mrpc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mrpc": {"acc": 0.5392156862745098, "acc_stderr": 0.024707732873723128, "f1": 0.5982905982905982, "f1_stderr": 0.028928325246283727}}, "versions": {"mrpc": 0}}
lm-evaluation-harness/tests/testdata/pile_gutenberg-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_gutenberg": {"bits_per_byte": 1.2443606332351536e-06, "byte_perplexity": 1.0000012443614075, "word_perplexity": 1.0000072174665404}}, "versions": {"pile_gutenberg": 0}}
lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 66436569a43163afb2caf422d32c5f329899e74c49865d4d13881fd465fd9976
lm-evaluation-harness/tests/testdata/pile_pubmed-central-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_pubmed-central": {"bits_per_byte": 2.2812488135667854e-05, "byte_perplexity": 1.0000158125368497, "word_perplexity": 1.000123107107861}}, "versions": {"pile_pubmed-central": 1}}
lm-evaluation-harness/tests/testdata/pile_stackexchange-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ e524bfb3e21cbdaddc117403a50df598520c7bf5b2c60ad8f2372cfa564e79be
lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_ubuntu-irc": {"bits_per_byte": 1.6298315496830533e-06, "byte_perplexity": 1.0000016298328778, "word_perplexity": 1.0000108866656874}}, "versions": {"pile_ubuntu-irc": 0}}
lm-evaluation-harness/tests/testdata/winogrande-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"winogrande": {"acc": 0.516179952644041, "acc_stderr": 0.014045126130978606}}, "versions": {"winogrande": 0}}
lm-evaluation-harness/tests/testdata/wmt14-en-fr-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt14-en-fr": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.011284118461117099, "chrf_stderr": 7.340651275964445e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt14-en-fr": 0}}
lm-evaluation-harness/tests/testdata/wmt16-en-de-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ d71e2074af3770e9b29ac561caf2e1c29ad6b0dc50ec2e7bcc5501747b11f0da
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95cec42ae770c1f2251d204b03e12d56fdb2e5561e4898c07b40382fe2474589
3
+ size 28636664
venv/lib/python3.10/site-packages/transformers/models/clvp/__init__.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_clvp": [
25
+ "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "ClvpConfig",
27
+ "ClvpDecoderConfig",
28
+ "ClvpEncoderConfig",
29
+ ],
30
+ "feature_extraction_clvp": ["ClvpFeatureExtractor"],
31
+ "processing_clvp": ["ClvpProcessor"],
32
+ "tokenization_clvp": ["ClvpTokenizer"],
33
+ }
34
+
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_clvp"] = [
43
+ "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "ClvpModelForConditionalGeneration",
45
+ "ClvpForCausalLM",
46
+ "ClvpModel",
47
+ "ClvpPreTrainedModel",
48
+ "ClvpEncoder",
49
+ "ClvpDecoder",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_clvp import (
55
+ CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ ClvpConfig,
57
+ ClvpDecoderConfig,
58
+ ClvpEncoderConfig,
59
+ )
60
+ from .feature_extraction_clvp import ClvpFeatureExtractor
61
+ from .processing_clvp import ClvpProcessor
62
+ from .tokenization_clvp import ClvpTokenizer
63
+
64
+ try:
65
+ if not is_torch_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_clvp import (
71
+ CLVP_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ ClvpDecoder,
73
+ ClvpEncoder,
74
+ ClvpForCausalLM,
75
+ ClvpModel,
76
+ ClvpModelForConditionalGeneration,
77
+ ClvpPreTrainedModel,
78
+ )
79
+
80
+ else:
81
+ import sys
82
+
83
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/clvp/configuration_clvp.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CLVP model configuration"""
16
+
17
+
18
+ import os
19
+ from typing import TYPE_CHECKING, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ pass
24
+
25
+ from ...configuration_utils import PretrainedConfig
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ from ..deprecated._archive_maps import CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
33
+
34
+
35
+ class ClvpEncoderConfig(PretrainedConfig):
36
+ r"""
37
+ This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP
38
+ text or CLVP speech encoder according to the specified arguments. Instantiating a configuration with the defaults
39
+ will yield a similar configuration to that of the encoder of the CLVP
40
+ [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
41
+
42
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
43
+ documentation from [`PretrainedConfig`] for more information.
44
+
45
+ Args:
46
+ vocab_size (`int`, *optional*, defaults to 256):
47
+ Vocabulary size of the CLVP Encoder model.
48
+ hidden_size (`int`, *optional*, defaults to 768):
49
+ Dimensionality of the encoder layers and the pooler layer.
50
+ intermediate_size (`int`, *optional*, defaults to 1536):
51
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
52
+ projection_dim (`int`, *optional*, defaults to 768):
53
+ Dimensionality of the projection vector.
54
+ num_hidden_layers (`int`, *optional*, defaults to 20):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 12):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
62
+ The epsilon used by the layer normalization layers.
63
+ attention_dropout (`float`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the attention probabilities.
65
+ dropout (`float`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the feed-forward layers in [`ClvpEncoderMLP`].
67
+ use_rotary_embedding (`bool`, *optional*, defaults to `True`):
68
+ Whether to use rotary_embedding or not.
69
+ use_attention_bias (`bool`, *optional*, defaults to `False`):
70
+ Whether to use bias in Query, Key and Value layers during self attention.
71
+ summary_type (`str`, *optional*, defaults to `"mean"`):
72
+ What strategy to use to get pooler_output from the last_hidden_state. `"last"`, `"first"`, `"mean"` and
73
+ `"cls_index"` are supported.
74
+ initializer_factor (`float`, *optional*, defaults to 1.0):
75
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
76
+ testing).
77
+ bos_token_id (`int`, *optional*, defaults to 255):
78
+ Beginning of sequence token id.
79
+ eos_token_id (`int`, *optional*, defaults to 0):
80
+ End of sequence token id.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ >>> from transformers import ClvpEncoderConfig, ClvpEncoder
86
+
87
+ >>> # Initializing a ClvpEncoderConfig with susnato/clvp_dev style configuration
88
+ >>> encoder_configuration = ClvpEncoderConfig()
89
+
90
+ >>> # Initializing a ClvpEncoder (with random weights) from the susnato/clvp_dev style configuration
91
+ >>> model = ClvpEncoder(encoder_configuration)
92
+
93
+ >>> # Accessing the model configuration
94
+ >>> configuration = model.config
95
+ ```"""
96
+
97
+ model_type = "clvp_encoder"
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=256,
102
+ hidden_size=768,
103
+ intermediate_size=1536,
104
+ projection_dim=768,
105
+ num_hidden_layers=20,
106
+ num_attention_heads=12,
107
+ hidden_act="gelu",
108
+ layer_norm_eps=1e-5,
109
+ attention_dropout=0.1,
110
+ dropout=0.1,
111
+ use_rotary_embedding=True,
112
+ use_attention_bias=False,
113
+ summary_type="mean",
114
+ initializer_factor=1.0,
115
+ bos_token_id=255,
116
+ eos_token_id=0,
117
+ **kwargs,
118
+ ):
119
+ self.vocab_size = vocab_size
120
+ self.hidden_size = hidden_size
121
+ self.intermediate_size = intermediate_size
122
+ self.projection_dim = projection_dim
123
+ self.num_hidden_layers = num_hidden_layers
124
+ self.num_attention_heads = num_attention_heads
125
+ self.layer_norm_eps = layer_norm_eps
126
+ self.hidden_act = hidden_act
127
+ self.initializer_factor = initializer_factor
128
+ self.attention_dropout = attention_dropout
129
+ self.dropout = dropout
130
+ self.use_rotary_embedding = use_rotary_embedding
131
+ self.use_attention_bias = use_attention_bias
132
+ self.summary_type = summary_type
133
+ self.bos_token_id = bos_token_id
134
+ self.eos_token_id = eos_token_id
135
+
136
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
137
+
138
+ @classmethod
139
+ def from_pretrained(
140
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs
141
+ ) -> "PretrainedConfig":
142
+ cls._set_token_in_kwargs(kwargs)
143
+
144
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
145
+
146
+ # make sure to have the config_type be either "text_config" or "speech_config"
147
+ # this is to make sure that we can load only text or speech configs from the nested ClvpConfig.
148
+ if config_type not in ["text_config", "speech_config"]:
149
+ raise ValueError(
150
+ f"We can only load either 'text_config' or 'speech_config' but you are trying to load" f"{config_type}"
151
+ )
152
+
153
+ # get the text config dict if we are loading from ClvpConfig
154
+ if config_dict.get("model_type") == "clvp":
155
+ config_dict = config_dict[config_type]
156
+
157
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
158
+ logger.warning(
159
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
160
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
161
+ )
162
+
163
+ return cls.from_dict(config_dict, **kwargs)
164
+
165
+
166
+ class ClvpDecoderConfig(PretrainedConfig):
167
+ r"""
168
+ This is the configuration class to store the configuration of a [`ClvpDecoder`]. It is used to instantiate a CLVP
169
+ Decoder Model according to the specified arguments, defining the model architecture. Instantiating a configuration
170
+ with the defaults will yield a similar configuration to that of the Decoder part of the CLVP
171
+ [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
172
+
173
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
174
+ documentation from [`PretrainedConfig`] for more information.
175
+
176
+ The architecture is similar to GPT2.
177
+
178
+ Args:
179
+ vocab_size (`int`, *optional*, defaults to 8194):
180
+ Vocabulary size of the model.
181
+ max_position_embeddings (`int`, *optional*, defaults to 608):
182
+ The maximum sequence length of mel tokens that this model might ever be used with. Similar to `n_positions`
183
+ in `GPT2Config`.
184
+ max_text_tokens (`int`, *optional*, defaults to 404):
185
+ The maximum sequence length of text tokens that this model might ever be used with. Similar to
186
+ `n_positions` in `GPT2Config`.
187
+ hidden_size (`int`, *optional*, defaults to 1024):
188
+ Dimensionality of the embeddings and hidden states.
189
+ num_hidden_layers (`int`, *optional*, defaults to 30):
190
+ Number of hidden layers in the Transformer encoder.
191
+ num_attention_heads (`int`, *optional*, defaults to 16):
192
+ Number of attention heads for each attention layer in the Transformer encoder.
193
+ n_inner (`int`, *optional*):
194
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times `hidden_size`.
195
+ num_mel_attn_blocks (`int`, *optional*, defaults to 6):
196
+ Denotes the number of self attention layers in [`ClvpConditioningEncoder`].
197
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
198
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
199
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
200
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
201
+ embd_pdrop (`float`, *optional*, defaults to 0.1):
202
+ The dropout ratio for the embeddings.
203
+ attention_dropout (`float`, *optional*, defaults to 0.1):
204
+ The dropout ratio for the attention.
205
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
206
+ The epsilon to use in the layer normalization layers.
207
+ initializer_range (`float`, *optional*, defaults to 0.02):
208
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
209
+ summary_type (`string`, *optional*, defaults to `"cls_index"`):
210
+ Argument used when doing sequence summary.
211
+
212
+ Has to be one of the following options:
213
+
214
+ - `"last"`: Take the last token hidden state (like XLNet).
215
+ - `"first"`: Take the first token hidden state (like BERT).
216
+ - `"mean"`: Take the mean of all tokens hidden states.
217
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
218
+ - `"attn"`: Not implemented now, use multi-head attention.
219
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
220
+ Whether or not to add a projection after the vector extraction.
221
+ summary_activation (`str`, *optional*):
222
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
223
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
224
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
225
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
226
+ The dropout ratio to be used after the projection and activation.
227
+ use_cache (`bool`, *optional*, defaults to `True`):
228
+ Whether or not the model should return the last key/values attentions (not used by all models).
229
+ bos_token_id (`int`, *optional*, defaults to 8192):
230
+ Beginning of sequence token id, used at the start of the generation.
231
+ eos_token_id (`int`, *optional*, defaults to 8193):
232
+ End of sequence token id, used in the method
233
+ [`ClvpModelForConditionalGeneration.fix_speech_decoder_output()`] to correct decoder outputs.
234
+ feature_size (`int`, *optional*, defaults to 80):
235
+ The feature dimension of the extracted mel features. This value is used in [`ClvpConditioningEncoder`].
236
+ use_attention_bias (`bool`, *optional*, defaults to `True`):
237
+ Whether to use bias in Query, Key and Value layers during self attention.
238
+ initializer_factor (`float`, *optional*, defaults to 1.0):
239
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
240
+ testing).
241
+ decoder_fixing_codes (`list`, *optional*, defaults to `[83, 45, 45, 248]`):
242
+ These values are used in the method `fix_speech_decoder_output` to fix decoder generated outputs.
243
+
244
+ Example:
245
+
246
+ ```python
247
+ >>> from transformers import ClvpDecoderConfig, ClvpDecoder
248
+
249
+ >>> # Initializing a ClvpDecoderConfig with susnato/clvp_dev style configuration
250
+ >>> decoder_configuration = ClvpDecoderConfig()
251
+
252
+ >>> # Initializing a ClvpDecoder (with random weights) from the susnato/clvp_dev style configuration
253
+ >>> model = ClvpDecoder(decoder_configuration)
254
+
255
+ >>> # Accessing the model configuration
256
+ >>> configuration = model.config
257
+ ```"""
258
+
259
+ model_type = "clvp_decoder"
260
+
261
+ def __init__(
262
+ self,
263
+ vocab_size=8194,
264
+ max_position_embeddings=608,
265
+ max_text_tokens=404,
266
+ hidden_size=1024,
267
+ num_hidden_layers=30,
268
+ num_attention_heads=16,
269
+ n_inner=None,
270
+ num_mel_attn_blocks=6,
271
+ activation_function="gelu_new",
272
+ resid_pdrop=0.1,
273
+ embd_pdrop=0.1,
274
+ attention_dropout=0.1,
275
+ layer_norm_epsilon=1e-5,
276
+ initializer_range=0.02,
277
+ summary_type="cls_index",
278
+ summary_use_proj=True,
279
+ summary_activation=None,
280
+ summary_proj_to_labels=True,
281
+ summary_first_dropout=0.1,
282
+ use_cache=True,
283
+ bos_token_id=8192,
284
+ eos_token_id=8193,
285
+ feature_size=80,
286
+ use_attention_bias=True,
287
+ initializer_factor=1.0,
288
+ decoder_fixing_codes=[83, 45, 45, 248],
289
+ **kwargs,
290
+ ):
291
+ self.vocab_size = vocab_size
292
+ self.max_position_embeddings = max_position_embeddings
293
+ self.max_text_tokens = max_text_tokens
294
+ self.hidden_size = hidden_size
295
+ self.num_hidden_layers = num_hidden_layers
296
+ self.num_attention_heads = num_attention_heads
297
+ self.n_inner = n_inner
298
+ self.num_mel_attn_blocks = num_mel_attn_blocks
299
+ self.activation_function = activation_function
300
+ self.resid_pdrop = resid_pdrop
301
+ self.embd_pdrop = embd_pdrop
302
+ self.attention_dropout = attention_dropout
303
+ self.layer_norm_epsilon = layer_norm_epsilon
304
+ self.initializer_range = initializer_range
305
+ self.summary_type = summary_type
306
+ self.summary_use_proj = summary_use_proj
307
+ self.summary_activation = summary_activation
308
+ self.summary_first_dropout = summary_first_dropout
309
+ self.summary_proj_to_labels = summary_proj_to_labels
310
+ self.use_cache = use_cache
311
+ self.feature_size = feature_size
312
+ self.use_attention_bias = use_attention_bias
313
+ self.initializer_factor = initializer_factor
314
+ self.decoder_fixing_codes = decoder_fixing_codes
315
+
316
+ self.bos_token_id = bos_token_id
317
+ self.eos_token_id = eos_token_id
318
+
319
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
320
+
321
+ @classmethod
322
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
323
+ cls._set_token_in_kwargs(kwargs)
324
+
325
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
326
+
327
+ # get the speech config dict if we are loading from ClvpConfig
328
+ if config_dict.get("model_type") == "clvp":
329
+ config_dict = config_dict["decoder_config"]
330
+
331
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
332
+ logger.warning(
333
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
334
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
335
+ )
336
+
337
+ return cls.from_dict(config_dict, **kwargs)
338
+
339
+
340
+ class ClvpConfig(PretrainedConfig):
341
+ r"""
342
+ [`ClvpConfig`] is the configuration class to store the configuration of a [`ClvpModelForConditionalGeneration`]. It
343
+ is used to instantiate a CLVP model according to the specified arguments, defining the text model, speech model and
344
+ decoder model configs. Instantiating a configuration with the defaults will yield a similar configuration to that
345
+ of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
346
+
347
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
348
+ documentation from [`PretrainedConfig`] for more information.
349
+
350
+ Args:
351
+ text_config (`dict`, *optional*):
352
+ Dictionary of configuration options used to initialize the CLVP text encoder.
353
+ speech_config (`dict`, *optional*):
354
+ Dictionary of configuration options used to initialize CLVP speech encoder.
355
+ decoder_config (`dict`, *optional*):
356
+ Dictionary of configuration options used to initialize [`ClvpDecoderConfig`].
357
+ projection_dim (`int`, *optional*, defaults to 768):
358
+ Dimentionality of text and speech projection layers.
359
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
360
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLVP implementation.
361
+ initializer_factor (`float`, *optional*, defaults to 1.0):
362
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
363
+ testing).
364
+ kwargs (*optional*):
365
+ Dictionary of keyword arguments.
366
+
367
+ Example:
368
+
369
+ ```python
370
+ >>> from transformers import ClvpConfig, ClvpModelForConditionalGeneration
371
+
372
+ >>> # Initializing a ClvpConfig with susnato/clvp_dev style configuration
373
+ >>> configuration = ClvpConfig()
374
+
375
+ >>> # Initializing a ClvpModelForConditionalGeneration (with random weights) from the susnato/clvp_dev style configuration
376
+ >>> model = ClvpModelForConditionalGeneration(configuration)
377
+
378
+ >>> # Accessing the model configuration
379
+ >>> configuration = model.config
380
+
381
+ >>> # We can also initialize a CLVPConfig from a CLVPTextConfig, CLVPSpeechConfig and a CLVPAutoRegressiveConfig
382
+ >>> from transformers import ClvpEncoderConfig, ClvpDecoderConfig
383
+
384
+ >>> # Initializing a CLVP text, CLVP speech and CLVP decoder configuration
385
+ >>> config_text = ClvpEncoderConfig()
386
+ >>> config_speech = ClvpEncoderConfig()
387
+ >>> decoder_config = ClvpDecoderConfig()
388
+
389
+ >>> config = ClvpConfig.from_sub_model_configs(config_text, config_speech, decoder_config)
390
+ ```"""
391
+
392
+ model_type = "clvp"
393
+ is_composition = True
394
+
395
+ def __init__(
396
+ self,
397
+ text_config=None,
398
+ speech_config=None,
399
+ decoder_config=None,
400
+ projection_dim=768,
401
+ logit_scale_init_value=2.6592,
402
+ initializer_factor=1.0,
403
+ **kwargs,
404
+ ):
405
+ super().__init__(**kwargs)
406
+
407
+ if text_config is None:
408
+ text_config = {}
409
+ logger.info("`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.")
410
+
411
+ if speech_config is None:
412
+ speech_config = {}
413
+ logger.info("`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.")
414
+
415
+ if decoder_config is None:
416
+ decoder_config = {}
417
+ logger.info("`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.")
418
+
419
+ self.text_config = ClvpEncoderConfig(**text_config)
420
+ self.speech_config = ClvpEncoderConfig(**speech_config)
421
+ self.decoder_config = ClvpDecoderConfig(**decoder_config)
422
+
423
+ self.projection_dim = projection_dim
424
+ self.logit_scale_init_value = logit_scale_init_value
425
+ self.initializer_factor = initializer_factor
426
+
427
+ @classmethod
428
+ def from_sub_model_configs(
429
+ cls,
430
+ text_config: ClvpEncoderConfig,
431
+ speech_config: ClvpEncoderConfig,
432
+ decoder_config: ClvpDecoderConfig,
433
+ **kwargs,
434
+ ):
435
+ r"""
436
+ Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model
437
+ configuration and CLVP decoder model configuration.
438
+
439
+ Args:
440
+ text_config (`ClvpEncoderConfig`):
441
+ Text model configuration of type [`ClvpEncoderConfig`].
442
+ speech_config (`ClvpEncoderConfig`):
443
+ Speech model configuration of type [`ClvpEncoderConfig`].
444
+ decoder_config (`ClvpDecoderConfig`):
445
+ Decoder model configuration of type [`ClvpDecoderConfig`].
446
+
447
+ Returns:
448
+ [`ClvpConfig`]: An instance of a configuration object
449
+ """
450
+
451
+ return cls(
452
+ text_config=text_config.to_dict(),
453
+ speech_config=speech_config.to_dict(),
454
+ decoder_config=decoder_config.to_dict(),
455
+ **kwargs,
456
+ )
venv/lib/python3.10/site-packages/transformers/models/dit/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc ADDED
Binary file (6.44 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DiT checkpoints from the unilm repository."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
28
+ from transformers.image_utils import PILImageResampling
29
+ from transformers.utils import logging
30
+
31
+
32
+ logging.set_verbosity_info()
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ # here we list all keys to be renamed (original name on the left, our name on the right)
37
+ def create_rename_keys(config, has_lm_head=False, is_semantic=False):
38
+ prefix = "backbone." if is_semantic else ""
39
+
40
+ rename_keys = []
41
+ for i in range(config.num_hidden_layers):
42
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
43
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
44
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
45
+ rename_keys.append(
46
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
47
+ )
48
+ rename_keys.append(
49
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
50
+ )
51
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
52
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
53
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
54
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
55
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
56
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
57
+
58
+ # projection layer + position embeddings
59
+ rename_keys.extend(
60
+ [
61
+ (f"{prefix}cls_token", "beit.embeddings.cls_token"),
62
+ (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
63
+ (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
64
+ (f"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
65
+ ]
66
+ )
67
+
68
+ if has_lm_head:
69
+ # mask token + layernorm
70
+ rename_keys.extend(
71
+ [
72
+ ("mask_token", "beit.embeddings.mask_token"),
73
+ ("norm.weight", "layernorm.weight"),
74
+ ("norm.bias", "layernorm.bias"),
75
+ ]
76
+ )
77
+ else:
78
+ # layernorm + classification head
79
+ rename_keys.extend(
80
+ [
81
+ ("fc_norm.weight", "beit.pooler.layernorm.weight"),
82
+ ("fc_norm.bias", "beit.pooler.layernorm.bias"),
83
+ ("head.weight", "classifier.weight"),
84
+ ("head.bias", "classifier.bias"),
85
+ ]
86
+ )
87
+
88
+ return rename_keys
89
+
90
+
91
+ # we split up the matrix of each encoder layer into queries, keys and values
92
+ def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
93
+ for i in range(config.num_hidden_layers):
94
+ prefix = "backbone." if is_semantic else ""
95
+ # queries, keys and values
96
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
97
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
98
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
99
+
100
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
101
+ : config.hidden_size, :
102
+ ]
103
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
104
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
105
+ config.hidden_size : config.hidden_size * 2, :
106
+ ]
107
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
108
+ -config.hidden_size :, :
109
+ ]
110
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
111
+
112
+ # gamma_1 and gamma_2
113
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
114
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
115
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
116
+
117
+ state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
118
+ state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
119
+
120
+
121
+ def rename_key(dct, old, new):
122
+ val = dct.pop(old)
123
+ dct[new] = val
124
+
125
+
126
+ # We will verify our results on an image of cute cats
127
+ def prepare_img():
128
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
129
+ im = Image.open(requests.get(url, stream=True).raw)
130
+ return im
131
+
132
+
133
+ @torch.no_grad()
134
+ def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False):
135
+ """
136
+ Copy/paste/tweak model's weights to our BEiT structure.
137
+ """
138
+
139
+ # define default BEiT configuration
140
+ has_lm_head = False if "rvlcdip" in checkpoint_url else True
141
+ config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head)
142
+
143
+ # size of the architecture
144
+ if "large" in checkpoint_url or "dit-l" in checkpoint_url:
145
+ config.hidden_size = 1024
146
+ config.intermediate_size = 4096
147
+ config.num_hidden_layers = 24
148
+ config.num_attention_heads = 16
149
+
150
+ # labels
151
+ if "rvlcdip" in checkpoint_url:
152
+ config.num_labels = 16
153
+ repo_id = "huggingface/label-files"
154
+ filename = "rvlcdip-id2label.json"
155
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
156
+ id2label = {int(k): v for k, v in id2label.items()}
157
+ config.id2label = id2label
158
+ config.label2id = {v: k for k, v in id2label.items()}
159
+
160
+ # load state_dict of original model, remove and rename some keys
161
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
162
+
163
+ rename_keys = create_rename_keys(config, has_lm_head=has_lm_head)
164
+ for src, dest in rename_keys:
165
+ rename_key(state_dict, src, dest)
166
+ read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head)
167
+
168
+ # load HuggingFace model
169
+ model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config)
170
+ model.eval()
171
+ model.load_state_dict(state_dict)
172
+
173
+ # Check outputs on an image
174
+ image_processor = BeitImageProcessor(
175
+ size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
176
+ )
177
+ image = prepare_img()
178
+
179
+ encoding = image_processor(images=image, return_tensors="pt")
180
+ pixel_values = encoding["pixel_values"]
181
+
182
+ outputs = model(pixel_values)
183
+ logits = outputs.logits
184
+
185
+ # verify logits
186
+ expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
187
+ assert logits.shape == torch.Size(expected_shape), "Shape of logits not as expected"
188
+
189
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
190
+ print(f"Saving model to {pytorch_dump_folder_path}")
191
+ model.save_pretrained(pytorch_dump_folder_path)
192
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
193
+ image_processor.save_pretrained(pytorch_dump_folder_path)
194
+
195
+ if push_to_hub:
196
+ if has_lm_head:
197
+ model_name = "dit-base" if "base" in checkpoint_url else "dit-large"
198
+ else:
199
+ model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
200
+ image_processor.push_to_hub(
201
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
202
+ organization="nielsr",
203
+ commit_message="Add image processor",
204
+ use_temp_dir=True,
205
+ )
206
+ model.push_to_hub(
207
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
208
+ organization="nielsr",
209
+ commit_message="Add model",
210
+ use_temp_dir=True,
211
+ )
212
+
213
+
214
+ if __name__ == "__main__":
215
+ parser = argparse.ArgumentParser()
216
+
217
+ parser.add_argument(
218
+ "--checkpoint_url",
219
+ default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
220
+ type=str,
221
+ help="URL to the original PyTorch checkpoint (.pth file).",
222
+ )
223
+ parser.add_argument(
224
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
225
+ )
226
+ parser.add_argument(
227
+ "--push_to_hub",
228
+ action="store_true",
229
+ )
230
+ args = parser.parse_args()
231
+ convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc ADDED
Binary file (15.9 kB). View file